diff --git a/docs/aiplatform_v1/services_.rst b/docs/aiplatform_v1/services_.rst index 1ad3b87e9a..f9610c33b3 100644 --- a/docs/aiplatform_v1/services_.rst +++ b/docs/aiplatform_v1/services_.rst @@ -31,6 +31,7 @@ Services for Google Cloud Aiplatform v1 API reasoning_engine_execution_service reasoning_engine_service schedule_service + session_service specialist_pool_service tensorboard_service vertex_rag_data_service diff --git a/docs/aiplatform_v1/session_service.rst b/docs/aiplatform_v1/session_service.rst new file mode 100644 index 0000000000..4cd3c5ec43 --- /dev/null +++ b/docs/aiplatform_v1/session_service.rst @@ -0,0 +1,10 @@ +SessionService +-------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.session_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.session_service.pagers + :members: + :inherited-members: diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py index f157bc3c69..30a57c1c07 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py index b868dd605d..6bd702b541 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py index 0bdfcf7759..263b8858d7 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py index 480d78450f..ff14cbd3a8 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py index b79ff794a5..78de47da0f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py index c43e63609b..e1245a4f71 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py index 69ee6dad2e..451e2d993c 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py index 9d0a6ef308..c31fff397e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index 9c54ee4c48..b7513c500a 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -100,6 +100,8 @@ from .services.reasoning_engine_service import ReasoningEngineServiceAsyncClient from .services.schedule_service import ScheduleServiceClient from .services.schedule_service import ScheduleServiceAsyncClient +from .services.session_service import SessionServiceClient +from .services.session_service import SessionServiceAsyncClient from .services.specialist_pool_service import SpecialistPoolServiceClient from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient from .services.tensorboard_service import TensorboardServiceClient @@ -563,6 +565,8 @@ from .types.io import SharePointSources from .types.io import SlackSource from .types.io import TFRecordDestination +from .types.io import VertexMultimodalDatasetDestination +from .types.io import VertexMultimodalDatasetSource from .types.job_service import CancelBatchPredictionJobRequest from .types.job_service import CancelCustomJobRequest from .types.job_service import CancelDataLabelingJobRequest @@ -903,6 +907,22 @@ from .types.service_networking import PSCAutomationConfig from .types.service_networking import PscInterfaceConfig from .types.service_networking import PSCAutomationState +from .types.session import EventActions +from .types.session import EventMetadata +from .types.session import Session +from .types.session import SessionEvent +from .types.session import Transcription +from .types.session_service import AppendEventRequest +from .types.session_service import AppendEventResponse +from .types.session_service import CreateSessionOperationMetadata +from .types.session_service import CreateSessionRequest +from .types.session_service import DeleteSessionRequest +from .types.session_service import GetSessionRequest +from .types.session_service import ListEventsRequest +from .types.session_service import ListEventsResponse +from .types.session_service import ListSessionsRequest +from .types.session_service import ListSessionsResponse +from .types.session_service import UpdateSessionRequest from .types.specialist_pool import SpecialistPool from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata from .types.specialist_pool_service import CreateSpecialistPoolRequest @@ -1213,6 +1233,7 @@ def _get_version(dependency_name): "ReasoningEngineExecutionServiceAsyncClient", "ReasoningEngineServiceAsyncClient", "ScheduleServiceAsyncClient", + "SessionServiceAsyncClient", "SpecialistPoolServiceAsyncClient", "TensorboardServiceAsyncClient", "VertexRagDataServiceAsyncClient", @@ -1230,6 +1251,8 @@ def _get_version(dependency_name): "Annotation", "AnnotationSpec", "ApiAuth", + "AppendEventRequest", + "AppendEventResponse", "Artifact", "AssignNotebookRuntimeOperationMetadata", "AssignNotebookRuntimeRequest", @@ -1369,6 +1392,8 @@ def _get_version(dependency_name): "CreateReasoningEngineRequest", "CreateRegistryFeatureOperationMetadata", "CreateScheduleRequest", + "CreateSessionOperationMetadata", + "CreateSessionRequest", "CreateSpecialistPoolOperationMetadata", "CreateSpecialistPoolRequest", "CreateStudyRequest", @@ -1432,6 +1457,7 @@ def _get_version(dependency_name): "DeleteReasoningEngineRequest", "DeleteSavedQueryRequest", "DeleteScheduleRequest", + "DeleteSessionRequest", "DeleteSpecialistPoolRequest", "DeleteStudyRequest", "DeleteTensorboardExperimentRequest", @@ -1483,6 +1509,8 @@ def _get_version(dependency_name): "EvaluatedAnnotationExplanation", "EvaluationServiceClient", "Event", + "EventActions", + "EventMetadata", "ExactMatchInput", "ExactMatchInstance", "ExactMatchMetricValue", @@ -1617,6 +1645,7 @@ def _get_version(dependency_name): "GetRagFileRequest", "GetReasoningEngineRequest", "GetScheduleRequest", + "GetSessionRequest", "GetSpecialistPoolRequest", "GetStudyRequest", "GetTensorboardExperimentRequest", @@ -1693,6 +1722,8 @@ def _get_version(dependency_name): "ListEndpointsResponse", "ListEntityTypesRequest", "ListEntityTypesResponse", + "ListEventsRequest", + "ListEventsResponse", "ListExecutionsRequest", "ListExecutionsResponse", "ListFeatureGroupsRequest", @@ -1755,6 +1786,8 @@ def _get_version(dependency_name): "ListSavedQueriesResponse", "ListSchedulesRequest", "ListSchedulesResponse", + "ListSessionsRequest", + "ListSessionsResponse", "ListSpecialistPoolsRequest", "ListSpecialistPoolsResponse", "ListStudiesRequest", @@ -2019,6 +2052,9 @@ def _get_version(dependency_name): "SecretRef", "Segment", "ServiceAccountSpec", + "Session", + "SessionEvent", + "SessionServiceClient", "SharePointSources", "ShieldedVmConfig", "SlackSource", @@ -2116,6 +2152,7 @@ def _get_version(dependency_name): "ToolParameterKeyMatchSpec", "TrainingConfig", "TrainingPipeline", + "Transcription", "Trial", "TrialContext", "TunedModel", @@ -2172,6 +2209,7 @@ def _get_version(dependency_name): "UpdateReasoningEngineOperationMetadata", "UpdateReasoningEngineRequest", "UpdateScheduleRequest", + "UpdateSessionRequest", "UpdateSpecialistPoolOperationMetadata", "UpdateSpecialistPoolRequest", "UpdateTensorboardExperimentRequest", @@ -2198,6 +2236,8 @@ def _get_version(dependency_name): "Value", "VertexAISearch", "VertexAiSearchConfig", + "VertexMultimodalDatasetDestination", + "VertexMultimodalDatasetSource", "VertexRagDataServiceClient", "VertexRagServiceClient", "VertexRagStore", diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index c5e53304ab..443458f10c 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -4572,6 +4572,130 @@ } } }, + "SessionService": { + "clients": { + "grpc": { + "libraryClient": "SessionServiceClient", + "rpcs": { + "AppendEvent": { + "methods": [ + "append_event" + ] + }, + "CreateSession": { + "methods": [ + "create_session" + ] + }, + "DeleteSession": { + "methods": [ + "delete_session" + ] + }, + "GetSession": { + "methods": [ + "get_session" + ] + }, + "ListEvents": { + "methods": [ + "list_events" + ] + }, + "ListSessions": { + "methods": [ + "list_sessions" + ] + }, + "UpdateSession": { + "methods": [ + "update_session" + ] + } + } + }, + "grpc-async": { + "libraryClient": "SessionServiceAsyncClient", + "rpcs": { + "AppendEvent": { + "methods": [ + "append_event" + ] + }, + "CreateSession": { + "methods": [ + "create_session" + ] + }, + "DeleteSession": { + "methods": [ + "delete_session" + ] + }, + "GetSession": { + "methods": [ + "get_session" + ] + }, + "ListEvents": { + "methods": [ + "list_events" + ] + }, + "ListSessions": { + "methods": [ + "list_sessions" + ] + }, + "UpdateSession": { + "methods": [ + "update_session" + ] + } + } + }, + "rest": { + "libraryClient": "SessionServiceClient", + "rpcs": { + "AppendEvent": { + "methods": [ + "append_event" + ] + }, + "CreateSession": { + "methods": [ + "create_session" + ] + }, + "DeleteSession": { + "methods": [ + "delete_session" + ] + }, + "GetSession": { + "methods": [ + "get_session" + ] + }, + "ListEvents": { + "methods": [ + "list_events" + ] + }, + "ListSessions": { + "methods": [ + "list_sessions" + ] + }, + "UpdateSession": { + "methods": [ + "update_session" + ] + } + } + } + } + }, "SpecialistPoolService": { "clients": { "grpc": { diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/async_client.py b/google/cloud/aiplatform_v1/services/data_foundry_service/async_client.py index f300d40ada..54c5d7dd22 100644 --- a/google/cloud/aiplatform_v1/services/data_foundry_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/async_client.py @@ -116,7 +116,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: DataFoundryServiceAsyncClient: The constructed client. """ - return DataFoundryServiceClient.from_service_account_info.__func__(DataFoundryServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + DataFoundryServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(DataFoundryServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -132,7 +135,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: DataFoundryServiceAsyncClient: The constructed client. """ - return DataFoundryServiceClient.from_service_account_file.__func__(DataFoundryServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + DataFoundryServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(DataFoundryServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/base.py b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/base.py index f932ec50d3..0f6ab84fbe 100644 --- a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/base.py @@ -87,8 +87,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -103,11 +101,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_base.py index 58a7c24515..639e07b64a 100644 --- a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_base.py @@ -785,6 +785,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -825,6 +829,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1170,6 +1178,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1230,6 +1242,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1587,6 +1603,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1647,6 +1667,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1988,6 +2012,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2060,6 +2088,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2409,6 +2441,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2469,6 +2505,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py index f321aca6be..46d879d521 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.dataset_service import pagers from google.cloud.aiplatform_v1.types import annotation from google.cloud.aiplatform_v1.types import annotation_spec @@ -63,10 +61,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport from .client import DatasetServiceClient @@ -149,7 +149,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: DatasetServiceAsyncClient: The constructed client. """ - return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + DatasetServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(DatasetServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -165,7 +168,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: DatasetServiceAsyncClient: The constructed client. """ - return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + DatasetServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(DatasetServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -507,7 +513,6 @@ async def sample_get_dataset(): request (Optional[Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]]): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 name (:class:`str`): Required. The name of the Dataset resource. @@ -1704,7 +1709,6 @@ async def sample_get_dataset_version(): request (Optional[Union[google.cloud.aiplatform_v1.types.GetDatasetVersionRequest, dict]]): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 name (:class:`str`): Required. The resource name of the Dataset version to delete. Format: @@ -2643,10 +2647,7 @@ async def list_annotations( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAnnotationsAsyncPager: - r"""Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + r"""Lists Annotations belongs to a dataitem. .. code-block:: python diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py index 9e0e03ff4f..ec2ffcab17 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.dataset_service import pagers from google.cloud.aiplatform_v1.types import annotation from google.cloud.aiplatform_v1.types import annotation_spec @@ -79,10 +77,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import DatasetServiceGrpcTransport from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport @@ -1091,7 +1091,6 @@ def sample_get_dataset(): request (Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 name (str): Required. The name of the Dataset resource. @@ -2261,7 +2260,6 @@ def sample_get_dataset_version(): request (Union[google.cloud.aiplatform_v1.types.GetDatasetVersionRequest, dict]): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 name (str): Required. The resource name of the Dataset version to delete. Format: @@ -3177,10 +3175,7 @@ def list_annotations( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAnnotationsPager: - r"""Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + r"""Lists Annotations belongs to a dataitem. .. code-block:: python diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py index f68e3f97cb..6d63373ac0 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py @@ -93,8 +93,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -109,11 +107,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py index 8410f7f790..baa6b4d9ba 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py @@ -853,10 +853,7 @@ def list_annotations( ]: r"""Return a callable for the list annotations method over gRPC. - Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + Lists Annotations belongs to a dataitem. Returns: Callable[[~.ListAnnotationsRequest], diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py index 1cdabd6e2f..4ecd09f9ff 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py @@ -880,10 +880,7 @@ def list_annotations( ]: r"""Return a callable for the list annotations method over gRPC. - Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + Lists Annotations belongs to a dataitem. Returns: Callable[[~.ListAnnotationsRequest], diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py index e084918200..639effd652 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py @@ -1837,6 +1837,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1877,6 +1881,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2203,6 +2211,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2263,6 +2275,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2601,6 +2617,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2661,6 +2681,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2983,6 +3007,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3055,6 +3083,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3385,6 +3417,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3445,6 +3481,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3550,7 +3590,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3706,7 +3746,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3854,7 +3894,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4004,7 +4044,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4153,7 +4193,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4309,7 +4349,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4569,7 +4609,6 @@ def __call__( request (~.dataset_service.GetDatasetRequest): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -4720,7 +4759,6 @@ def __call__( request (~.dataset_service.GetDatasetVersionRequest): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -4916,7 +4954,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5824,7 +5862,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py index ddc755b15b..17fff63566 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py @@ -1752,7 +1752,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1918,7 +1918,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2076,7 +2076,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2236,7 +2236,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2394,7 +2394,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2562,7 +2562,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2839,7 +2839,6 @@ async def __call__( request (~.dataset_service.GetDatasetRequest): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2999,7 +2998,6 @@ async def __call__( request (~.dataset_service.GetDatasetVersionRequest): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3205,7 +3203,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4165,7 +4163,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5056,6 +5054,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5096,6 +5098,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -5422,6 +5428,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5482,6 +5492,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -5820,6 +5834,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5880,6 +5898,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -6202,6 +6224,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -6274,6 +6300,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -6604,6 +6634,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6664,6 +6698,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py index f04b69be2a..599ead0a73 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py @@ -1738,6 +1738,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1778,6 +1782,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -2123,6 +2131,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2183,6 +2195,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2540,6 +2556,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2600,6 +2620,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2941,6 +2965,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3013,6 +3041,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3362,6 +3394,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3422,6 +3458,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py index c8d84733bd..26640fe511 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import pagers from google.cloud.aiplatform_v1.types import deployment_resource_pool from google.cloud.aiplatform_v1.types import ( @@ -61,9 +59,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import DeploymentResourcePoolServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import DeploymentResourcePoolServiceGrpcAsyncIOTransport from .client import DeploymentResourcePoolServiceClient @@ -156,7 +156,12 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: DeploymentResourcePoolServiceAsyncClient: The constructed client. """ - return DeploymentResourcePoolServiceClient.from_service_account_info.__func__(DeploymentResourcePoolServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + DeploymentResourcePoolServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func( + DeploymentResourcePoolServiceAsyncClient, info, *args, **kwargs + ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -172,7 +177,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: DeploymentResourcePoolServiceAsyncClient: The constructed client. """ - return DeploymentResourcePoolServiceClient.from_service_account_file.__func__(DeploymentResourcePoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + DeploymentResourcePoolServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + DeploymentResourcePoolServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py index 40ef2a4fb4..9c59404b0b 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import pagers from google.cloud.aiplatform_v1.types import deployment_resource_pool from google.cloud.aiplatform_v1.types import ( @@ -77,9 +75,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import DeploymentResourcePoolServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import DeploymentResourcePoolServiceGrpcTransport from .transports.grpc_asyncio import DeploymentResourcePoolServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py index 5421fd8c1f..678831a458 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py @@ -89,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -105,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py index 85e22e88ca..3e1c4c6385 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py @@ -1105,6 +1105,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1145,6 +1149,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1471,6 +1479,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1531,6 +1543,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1869,6 +1885,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1929,6 +1949,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2251,6 +2275,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2323,6 +2351,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2653,6 +2685,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2713,6 +2749,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2824,7 +2864,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2980,7 +3020,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3621,7 +3661,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py index 31a3fa2932..d0de7eb4cf 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py @@ -959,7 +959,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1125,7 +1125,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1800,7 +1800,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2204,6 +2204,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2244,6 +2248,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2570,6 +2578,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2630,6 +2642,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2968,6 +2984,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3028,6 +3048,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3350,6 +3374,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3422,6 +3450,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3752,6 +3784,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3812,6 +3848,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_base.py index 962ef0cd72..2effba5534 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_base.py @@ -1057,6 +1057,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1097,6 +1101,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1442,6 +1450,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1502,6 +1514,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1859,6 +1875,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1919,6 +1939,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2260,6 +2284,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2332,6 +2360,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2681,6 +2713,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2741,6 +2777,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 2d85152337..f3a2b094ea 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import endpoint @@ -58,9 +56,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport from .client import EndpointServiceClient @@ -145,7 +145,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: EndpointServiceAsyncClient: The constructed client. """ - return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + EndpointServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(EndpointServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -161,7 +164,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: EndpointServiceAsyncClient: The constructed client. """ - return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + EndpointServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(EndpointServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py index d312180cbb..766cf26442 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import endpoint @@ -74,9 +72,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import EndpointServiceGrpcTransport from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py index e014eef1fa..901393d915 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py @@ -90,8 +90,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -106,11 +104,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py index 6f423f8ea1..2f0afe4e52 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py @@ -1256,6 +1256,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1296,6 +1300,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1622,6 +1630,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1682,6 +1694,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2020,6 +2036,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2080,6 +2100,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2402,6 +2426,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2474,6 +2502,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2804,6 +2836,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2864,6 +2900,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2969,7 +3009,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3117,7 +3157,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3269,7 +3309,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3724,7 +3764,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3877,7 +3917,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4189,7 +4229,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py index 7de4beb77f..46352cdb44 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py @@ -1120,7 +1120,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1281,7 +1281,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1445,7 +1445,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1931,7 +1931,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2095,7 +2095,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2430,7 +2430,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2834,6 +2834,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2874,6 +2878,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -3200,6 +3208,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3260,6 +3272,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -3598,6 +3614,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3658,6 +3678,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3980,6 +4004,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -4052,6 +4080,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -4382,6 +4414,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4442,6 +4478,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_base.py index 54d9133dd1..3b21ab9c2b 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_base.py @@ -1215,6 +1215,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1255,6 +1259,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1600,6 +1608,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1660,6 +1672,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2017,6 +2033,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2077,6 +2097,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2418,6 +2442,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2490,6 +2518,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2839,6 +2871,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2899,6 +2935,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/async_client.py b/google/cloud/aiplatform_v1/services/evaluation_service/async_client.py index c7449e2df5..b814320cc0 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/async_client.py @@ -114,7 +114,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: EvaluationServiceAsyncClient: The constructed client. """ - return EvaluationServiceClient.from_service_account_info.__func__(EvaluationServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + EvaluationServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(EvaluationServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -130,7 +133,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: EvaluationServiceAsyncClient: The constructed client. """ - return EvaluationServiceClient.from_service_account_file.__func__(EvaluationServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + EvaluationServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(EvaluationServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/transports/base.py b/google/cloud/aiplatform_v1/services/evaluation_service/transports/base.py index aabc3400c6..ea412bc6f7 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/transports/base.py @@ -87,8 +87,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -103,11 +101,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_base.py index f0911dfe1a..82c168b089 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_base.py @@ -785,6 +785,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -825,6 +829,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1170,6 +1178,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1230,6 +1242,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1587,6 +1603,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1647,6 +1667,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1988,6 +2012,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2060,6 +2088,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2409,6 +2441,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2469,6 +2505,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py index 7d4818f6b4..68b1202243 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.feature_online_store_admin_service import ( pagers, ) @@ -64,11 +62,13 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import interval_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.interval_pb2 as interval_pb2 # type: ignore from .transports.base import ( FeatureOnlineStoreAdminServiceTransport, DEFAULT_CLIENT_INFO, @@ -164,7 +164,12 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: FeatureOnlineStoreAdminServiceAsyncClient: The constructed client. """ - return FeatureOnlineStoreAdminServiceClient.from_service_account_info.__func__(FeatureOnlineStoreAdminServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + FeatureOnlineStoreAdminServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func( + FeatureOnlineStoreAdminServiceAsyncClient, info, *args, **kwargs + ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -180,7 +185,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: FeatureOnlineStoreAdminServiceAsyncClient: The constructed client. """ - return FeatureOnlineStoreAdminServiceClient.from_service_account_file.__func__(FeatureOnlineStoreAdminServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + FeatureOnlineStoreAdminServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + FeatureOnlineStoreAdminServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py index fbb24c0d6a..df47e201ba 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.feature_online_store_admin_service import ( pagers, ) @@ -80,11 +78,13 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import interval_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.interval_pb2 as interval_pb2 # type: ignore from .transports.base import ( FeatureOnlineStoreAdminServiceTransport, DEFAULT_CLIENT_INFO, diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py index 3209eaf102..74e8bb4e92 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py @@ -91,8 +91,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -107,11 +105,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py index f213a082f7..565c115e71 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py @@ -1514,6 +1514,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1554,6 +1558,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1880,6 +1888,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1940,6 +1952,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2278,6 +2294,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2338,6 +2358,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2660,6 +2684,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2732,6 +2760,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3062,6 +3094,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3122,6 +3158,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3233,7 +3273,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3389,7 +3429,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3543,7 +3583,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3693,7 +3733,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4951,7 +4991,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5107,7 +5147,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py index d8d8852e98..8e2ea5cdb3 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py @@ -1403,7 +1403,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1571,7 +1571,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1733,7 +1733,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1895,7 +1895,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3215,7 +3215,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3383,7 +3383,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3785,6 +3785,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3825,6 +3829,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -4151,6 +4159,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4211,6 +4223,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -4549,6 +4565,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4609,6 +4629,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -4931,6 +4955,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -5003,6 +5031,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -5333,6 +5365,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5393,6 +5429,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py index 135afb37c8..3751ad286d 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py @@ -1436,6 +1436,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1476,6 +1480,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1821,6 +1829,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1881,6 +1893,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2238,6 +2254,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2298,6 +2318,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2639,6 +2663,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2711,6 +2739,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3060,6 +3092,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3120,6 +3156,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py index 122de33a09..00efb7593d 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py @@ -53,9 +53,9 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import FeatureOnlineStoreServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import FeatureOnlineStoreServiceGrpcAsyncIOTransport from .client import FeatureOnlineStoreServiceClient @@ -132,7 +132,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: FeatureOnlineStoreServiceAsyncClient: The constructed client. """ - return FeatureOnlineStoreServiceClient.from_service_account_info.__func__(FeatureOnlineStoreServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + FeatureOnlineStoreServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(FeatureOnlineStoreServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -148,7 +151,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: FeatureOnlineStoreServiceAsyncClient: The constructed client. """ - return FeatureOnlineStoreServiceClient.from_service_account_file.__func__(FeatureOnlineStoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + FeatureOnlineStoreServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + FeatureOnlineStoreServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py index dc1658b327..5aa62489c2 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py @@ -68,9 +68,9 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import FeatureOnlineStoreServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import FeatureOnlineStoreServiceGrpcTransport from .transports.grpc_asyncio import FeatureOnlineStoreServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py index dfc348b556..71052c78ed 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py @@ -87,8 +87,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -103,11 +101,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_base.py index 2595ed2c8b..3b9f2f22f8 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_base.py @@ -894,6 +894,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -934,6 +938,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1279,6 +1287,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1339,6 +1351,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1696,6 +1712,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1756,6 +1776,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2097,6 +2121,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2169,6 +2197,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2518,6 +2550,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2578,6 +2614,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py index c2fd1c0f47..65ef956dee 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.feature_registry_service import pagers from google.cloud.aiplatform_v1.types import feature from google.cloud.aiplatform_v1.types import feature as gca_feature @@ -59,9 +57,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import FeatureRegistryServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import FeatureRegistryServiceGrpcAsyncIOTransport from .client import FeatureRegistryServiceClient @@ -136,7 +136,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: FeatureRegistryServiceAsyncClient: The constructed client. """ - return FeatureRegistryServiceClient.from_service_account_info.__func__(FeatureRegistryServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + FeatureRegistryServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(FeatureRegistryServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -152,7 +155,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: FeatureRegistryServiceAsyncClient: The constructed client. """ - return FeatureRegistryServiceClient.from_service_account_file.__func__(FeatureRegistryServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + FeatureRegistryServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + FeatureRegistryServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/client.py b/google/cloud/aiplatform_v1/services/feature_registry_service/client.py index 732b9a2c5a..113ca24a8f 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.feature_registry_service import pagers from google.cloud.aiplatform_v1.types import feature from google.cloud.aiplatform_v1.types import feature as gca_feature @@ -75,9 +73,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import FeatureRegistryServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import FeatureRegistryServiceGrpcTransport from .transports.grpc_asyncio import FeatureRegistryServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py index e011126828..5a3217c8c8 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py @@ -91,8 +91,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -107,11 +105,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py index 685e3875ce..f02a9bf94f 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py @@ -1382,6 +1382,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1422,6 +1426,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1748,6 +1756,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1808,6 +1820,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2146,6 +2162,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2206,6 +2226,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2528,6 +2552,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2600,6 +2628,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2930,6 +2962,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2990,6 +3026,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3100,7 +3140,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3258,7 +3298,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3414,7 +3454,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3567,7 +3607,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3717,7 +3757,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4489,7 +4529,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4645,7 +4685,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_asyncio.py index 3f0bdc8e3d..b029900a45 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_asyncio.py @@ -1260,7 +1260,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1426,7 +1426,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1590,7 +1590,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1751,7 +1751,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1909,7 +1909,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2713,7 +2713,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2877,7 +2877,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3279,6 +3279,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3319,6 +3323,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -3645,6 +3653,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3705,6 +3717,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -4043,6 +4059,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4103,6 +4123,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -4425,6 +4449,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -4497,6 +4525,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -4827,6 +4859,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4887,6 +4923,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_base.py index 519ca715bd..8389f9ba83 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_base.py @@ -1302,6 +1302,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1342,6 +1346,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1687,6 +1695,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1747,6 +1759,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2104,6 +2120,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2164,6 +2184,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2505,6 +2529,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2577,6 +2605,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2926,6 +2958,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2986,6 +3022,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py index 06211d9ea6..c3b462a6bf 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py @@ -135,7 +135,12 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: FeaturestoreOnlineServingServiceAsyncClient: The constructed client. """ - return FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__(FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func( + FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs + ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -151,7 +156,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: FeaturestoreOnlineServingServiceAsyncClient: The constructed client. """ - return FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__(FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py index cd8d16436f..3a28268832 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py @@ -87,8 +87,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -103,11 +101,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_base.py index c617dfeae9..7cb6b9593c 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_base.py @@ -909,6 +909,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -949,6 +953,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1294,6 +1302,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1354,6 +1366,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1711,6 +1727,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1771,6 +1791,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2112,6 +2136,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2184,6 +2212,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2533,6 +2565,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2593,6 +2629,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py index a35ba31d84..ed619bd379 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.featurestore_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import entity_type @@ -62,9 +60,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport from .client import FeaturestoreServiceClient @@ -141,7 +141,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: FeaturestoreServiceAsyncClient: The constructed client. """ - return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + FeaturestoreServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(FeaturestoreServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -157,7 +160,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: FeaturestoreServiceAsyncClient: The constructed client. """ - return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + FeaturestoreServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_service/client.py index 4d1f9103bb..dcf1e51c1b 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.featurestore_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import entity_type @@ -78,9 +76,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import FeaturestoreServiceGrpcTransport from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py index 5b96eaaea8..c6c97f20ae 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py @@ -93,8 +93,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -109,11 +107,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py index 8e95a8724d..d3e6aa42de 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py @@ -1958,6 +1958,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1998,6 +2002,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2324,6 +2332,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2384,6 +2396,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2722,6 +2738,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2782,6 +2802,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3104,6 +3128,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3176,6 +3204,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3506,6 +3538,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3566,6 +3602,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3676,7 +3716,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3834,7 +3874,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3992,7 +4032,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4148,7 +4188,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4304,7 +4344,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4457,7 +4497,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4607,7 +4647,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4757,7 +4797,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4914,7 +4954,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5072,7 +5112,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5689,7 +5729,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6780,7 +6820,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py index 9602b1526f..c609f94c78 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py @@ -1886,7 +1886,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2050,7 +2050,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2216,7 +2216,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2382,7 +2382,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2546,7 +2546,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2705,7 +2705,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2865,7 +2865,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3023,7 +3023,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3186,7 +3186,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3350,7 +3350,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3997,7 +3997,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5136,7 +5136,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5538,6 +5538,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5578,6 +5582,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -5904,6 +5912,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5964,6 +5976,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -6302,6 +6318,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -6362,6 +6382,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -6684,6 +6708,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -6756,6 +6784,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -7086,6 +7118,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -7146,6 +7182,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_base.py index e4f635773c..4228da1729 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_base.py @@ -1836,6 +1836,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1876,6 +1880,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -2221,6 +2229,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2281,6 +2293,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2638,6 +2654,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2698,6 +2718,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -3039,6 +3063,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3111,6 +3139,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3460,6 +3492,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3520,6 +3556,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py index 8dd9d772a7..19bebec217 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py @@ -56,9 +56,9 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import GenAiCacheServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import GenAiCacheServiceGrpcAsyncIOTransport from .client import GenAiCacheServiceClient @@ -129,7 +129,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: GenAiCacheServiceAsyncClient: The constructed client. """ - return GenAiCacheServiceClient.from_service_account_info.__func__(GenAiCacheServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + GenAiCacheServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(GenAiCacheServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -145,7 +148,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: GenAiCacheServiceAsyncClient: The constructed client. """ - return GenAiCacheServiceClient.from_service_account_file.__func__(GenAiCacheServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + GenAiCacheServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(GenAiCacheServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py index 66f50c8052..8deb96e615 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py @@ -72,9 +72,9 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import GenAiCacheServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import GenAiCacheServiceGrpcTransport from .transports.grpc_asyncio import GenAiCacheServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py index bd50ff336f..5068713a23 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py @@ -34,7 +34,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -90,8 +90,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -106,11 +104,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py index a88322b11d..6b25fa58d3 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py @@ -37,7 +37,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import GenAiCacheServiceTransport, DEFAULT_CLIENT_INFO try: diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py index f0ccb0c644..bc940698ae 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py @@ -40,7 +40,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import GenAiCacheServiceTransport, DEFAULT_CLIENT_INFO from .grpc import GenAiCacheServiceGrpcTransport diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py index 9f6bccd99c..99a1195f05 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py @@ -39,7 +39,7 @@ from google.cloud.aiplatform_v1.types import cached_content from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import gen_ai_cache_service -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -915,7 +915,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py index 774842c79f..1dbc81be84 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py @@ -51,7 +51,7 @@ from google.cloud.aiplatform_v1.types import cached_content from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import gen_ai_cache_service -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -998,7 +998,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py index 723bd93d46..6c840c183b 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py @@ -30,7 +30,7 @@ from google.cloud.aiplatform_v1.types import cached_content from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import gen_ai_cache_service -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -988,6 +988,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1028,6 +1032,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1373,6 +1381,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1433,6 +1445,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1790,6 +1806,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1850,6 +1870,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2191,6 +2215,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2263,6 +2291,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2612,6 +2644,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2672,6 +2708,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py index 9b5f7149bf..87262e71d0 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import genai_tuning_service @@ -57,8 +55,10 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import GenAiTuningServiceGrpcAsyncIOTransport from .client import GenAiTuningServiceClient @@ -135,7 +135,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: GenAiTuningServiceAsyncClient: The constructed client. """ - return GenAiTuningServiceClient.from_service_account_info.__func__(GenAiTuningServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + GenAiTuningServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(GenAiTuningServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -151,7 +154,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: GenAiTuningServiceAsyncClient: The constructed client. """ - return GenAiTuningServiceClient.from_service_account_file.__func__(GenAiTuningServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + GenAiTuningServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(GenAiTuningServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py index 7e7a8d9dd5..a7dba1e309 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import genai_tuning_service @@ -73,8 +71,10 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import GenAiTuningServiceGrpcTransport from .transports.grpc_asyncio import GenAiTuningServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py index 029e02166c..901c5b262f 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py @@ -35,7 +35,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -91,8 +91,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -107,11 +105,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py index b9e44709cb..d27e9ee313 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py @@ -38,7 +38,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO try: diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py index 1fb040f1f6..7b27f6ab7a 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py @@ -41,7 +41,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO from .grpc import GenAiTuningServiceGrpcTransport diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py index 97eddac109..0c4e4a836b 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py @@ -40,7 +40,7 @@ from google.cloud.aiplatform_v1.types import genai_tuning_service from google.cloud.aiplatform_v1.types import tuning_job from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1001,6 +1001,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1041,6 +1045,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1367,6 +1375,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1427,6 +1439,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1765,6 +1781,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1825,6 +1845,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2147,6 +2171,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2219,6 +2247,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2549,6 +2581,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2609,6 +2645,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2710,7 +2750,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3293,7 +3333,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_asyncio.py index 0a02725785..5d64547d39 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_asyncio.py @@ -53,7 +53,7 @@ from google.cloud.aiplatform_v1.types import genai_tuning_service from google.cloud.aiplatform_v1.types import tuning_job from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -838,7 +838,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1449,7 +1449,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1851,6 +1851,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1891,6 +1895,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2217,6 +2225,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2277,6 +2289,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2615,6 +2631,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2675,6 +2695,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2997,6 +3021,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3069,6 +3097,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3399,6 +3431,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3459,6 +3495,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_base.py index 167712cb6f..77a742039b 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_base.py @@ -30,7 +30,7 @@ from google.cloud.aiplatform_v1.types import genai_tuning_service from google.cloud.aiplatform_v1.types import tuning_job from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -996,6 +996,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1036,6 +1040,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1381,6 +1389,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1441,6 +1453,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1798,6 +1814,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1858,6 +1878,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2199,6 +2223,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2271,6 +2299,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2620,6 +2652,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2680,6 +2716,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py index ac47a2e3ae..18ac885345 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import index_endpoint @@ -58,9 +56,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport from .client import IndexEndpointServiceClient @@ -135,7 +135,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: IndexEndpointServiceAsyncClient: The constructed client. """ - return IndexEndpointServiceClient.from_service_account_info.__func__(IndexEndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + IndexEndpointServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(IndexEndpointServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -151,7 +154,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: IndexEndpointServiceAsyncClient: The constructed client. """ - return IndexEndpointServiceClient.from_service_account_file.__func__(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + IndexEndpointServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py index eee8a381c8..b67a668a9f 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import index_endpoint @@ -74,9 +72,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import IndexEndpointServiceGrpcTransport from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py index df060fc90e..02a248601e 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py @@ -90,8 +90,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -106,11 +104,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py index afbda0a6df..971b0614c4 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py @@ -1211,6 +1211,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1251,6 +1255,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1577,6 +1585,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1637,6 +1649,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1975,6 +1991,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2035,6 +2055,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2357,6 +2381,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2429,6 +2457,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2759,6 +2791,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2819,6 +2855,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2927,7 +2967,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3080,7 +3120,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3235,7 +3275,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3702,7 +3742,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3858,7 +3898,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py index de9cb8d2ad..015ca51aa4 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py @@ -1070,7 +1070,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1229,7 +1229,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1392,7 +1392,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1877,7 +1877,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2041,7 +2041,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2609,6 +2609,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2649,6 +2653,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2975,6 +2983,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3035,6 +3047,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -3373,6 +3389,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3433,6 +3453,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3755,6 +3779,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3827,6 +3855,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -4157,6 +4189,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4217,6 +4253,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py index 5a8c949ec6..1104824ad0 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py @@ -1158,6 +1158,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1198,6 +1202,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1543,6 +1551,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1603,6 +1615,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1960,6 +1976,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2020,6 +2040,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2361,6 +2385,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2433,6 +2461,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2782,6 +2814,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2842,6 +2878,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/index_service/async_client.py b/google/cloud/aiplatform_v1/services/index_service/async_client.py index 16ac0bd21f..287ded362b 100644 --- a/google/cloud/aiplatform_v1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.index_service import pagers from google.cloud.aiplatform_v1.types import deployed_index_ref from google.cloud.aiplatform_v1.types import encryption_spec @@ -58,10 +56,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport from .client import IndexServiceClient @@ -130,7 +130,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: IndexServiceAsyncClient: The constructed client. """ - return IndexServiceClient.from_service_account_info.__func__(IndexServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + IndexServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(IndexServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -146,7 +149,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: IndexServiceAsyncClient: The constructed client. """ - return IndexServiceClient.from_service_account_file.__func__(IndexServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + IndexServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(IndexServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/index_service/client.py b/google/cloud/aiplatform_v1/services/index_service/client.py index 615e55f164..09b22de5d9 100644 --- a/google/cloud/aiplatform_v1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.index_service import pagers from google.cloud.aiplatform_v1.types import deployed_index_ref from google.cloud.aiplatform_v1.types import encryption_spec @@ -74,10 +72,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import IndexServiceGrpcTransport from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_service/transports/base.py index 314ea5b852..5dd9cd7775 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/base.py @@ -89,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -105,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/rest.py b/google/cloud/aiplatform_v1/services/index_service/transports/rest.py index 70bc71c92f..aac621ff0f 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/rest.py @@ -1142,6 +1142,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1182,6 +1186,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1508,6 +1516,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1568,6 +1580,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1906,6 +1922,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1966,6 +1986,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2288,6 +2312,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2360,6 +2388,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2690,6 +2722,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2750,6 +2786,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2861,7 +2901,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3013,7 +3053,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3635,7 +3675,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/index_service/transports/rest_asyncio.py index 79eff3f107..aa82cc32a7 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/rest_asyncio.py @@ -1001,7 +1001,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1163,7 +1163,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1819,7 +1819,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2387,6 +2387,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2427,6 +2431,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2753,6 +2761,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2813,6 +2825,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -3151,6 +3167,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3211,6 +3231,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3533,6 +3557,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3605,6 +3633,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3935,6 +3967,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3995,6 +4031,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/index_service/transports/rest_base.py index 22907d7de1..30d09701db 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/rest_base.py @@ -1098,6 +1098,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1138,6 +1142,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1483,6 +1491,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1543,6 +1555,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1900,6 +1916,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1960,6 +1980,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2301,6 +2325,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2373,6 +2401,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2722,6 +2754,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2782,6 +2818,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index 7925344a65..7692ce2451 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.job_service import pagers from google.cloud.aiplatform_v1.types import batch_prediction_job from google.cloud.aiplatform_v1.types import ( @@ -82,13 +80,15 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.money_pb2 as money_pb2 # type: ignore from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport from .client import JobServiceClient @@ -203,7 +203,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: JobServiceAsyncClient: The constructed client. """ - return JobServiceClient.from_service_account_info.__func__(JobServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + JobServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(JobServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -219,7 +222,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: JobServiceAsyncClient: The constructed client. """ - return JobServiceClient.from_service_account_file.__func__(JobServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + JobServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(JobServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index 4f571d0735..da26b8089b 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.job_service import pagers from google.cloud.aiplatform_v1.types import batch_prediction_job from google.cloud.aiplatform_v1.types import ( @@ -98,13 +96,15 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.money_pb2 as money_pb2 # type: ignore from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1/services/job_service/transports/base.py index d83b39ee67..bafd938625 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/base.py @@ -51,7 +51,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -110,8 +110,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -126,11 +124,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py index c94adeb1e0..2149526426 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py @@ -54,7 +54,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import JobServiceTransport, DEFAULT_CLIENT_INFO try: diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py index 745ff1261a..8d19d6ac71 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py @@ -57,7 +57,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import JobServiceTransport, DEFAULT_CLIENT_INFO from .grpc import JobServiceGrpcTransport diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest.py index d871ff127f..02929c3118 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest.py @@ -56,7 +56,7 @@ ) from google.cloud.aiplatform_v1.types import nas_job from google.cloud.aiplatform_v1.types import nas_job as gca_nas_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -2517,6 +2517,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2557,6 +2561,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2883,6 +2891,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2943,6 +2955,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -3281,6 +3297,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3341,6 +3361,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3663,6 +3687,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3735,6 +3763,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -4065,6 +4097,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4125,6 +4161,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -4226,7 +4266,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4342,7 +4382,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4458,7 +4498,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4576,7 +4616,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4698,7 +4738,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5801,7 +5841,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5950,7 +5990,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6099,7 +6139,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6250,7 +6290,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6407,7 +6447,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6560,7 +6600,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -8890,7 +8930,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -9010,7 +9050,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -9309,7 +9349,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py index 78efe15f0b..0cc6a79e45 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py @@ -69,7 +69,7 @@ ) from google.cloud.aiplatform_v1.types import nas_job from google.cloud.aiplatform_v1.types import nas_job as gca_nas_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -2507,7 +2507,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2629,7 +2629,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2753,7 +2753,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2881,7 +2881,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3009,7 +3009,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4170,7 +4170,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4329,7 +4329,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4488,7 +4488,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4653,7 +4653,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4818,7 +4818,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4983,7 +4983,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -7437,7 +7437,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -7565,7 +7565,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -7878,7 +7878,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -8284,6 +8284,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -8324,6 +8328,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -8650,6 +8658,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8710,6 +8722,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -9048,6 +9064,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -9108,6 +9128,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -9430,6 +9454,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -9502,6 +9530,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -9832,6 +9864,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -9892,6 +9928,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py index fe67301f48..c7a8bddf89 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py @@ -46,7 +46,7 @@ ) from google.cloud.aiplatform_v1.types import nas_job from google.cloud.aiplatform_v1.types import nas_job as gca_nas_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -2560,6 +2560,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2600,6 +2604,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -2945,6 +2953,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3005,6 +3017,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -3362,6 +3378,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3422,6 +3442,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -3763,6 +3787,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3835,6 +3863,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -4184,6 +4216,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4244,6 +4280,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py b/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py index 73fe6e03eb..eda2eca20f 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py @@ -52,7 +52,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore from .transports.base import LlmUtilityServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import LlmUtilityServiceGrpcAsyncIOTransport from .client import LlmUtilityServiceClient @@ -121,7 +121,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: LlmUtilityServiceAsyncClient: The constructed client. """ - return LlmUtilityServiceClient.from_service_account_info.__func__(LlmUtilityServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + LlmUtilityServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(LlmUtilityServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -137,7 +140,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: LlmUtilityServiceAsyncClient: The constructed client. """ - return LlmUtilityServiceClient.from_service_account_file.__func__(LlmUtilityServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + LlmUtilityServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(LlmUtilityServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/client.py b/google/cloud/aiplatform_v1/services/llm_utility_service/client.py index cfad479023..19c9bd9c4e 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/client.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/client.py @@ -68,7 +68,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore from .transports.base import LlmUtilityServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import LlmUtilityServiceGrpcTransport from .transports.grpc_asyncio import LlmUtilityServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py index 8877c0d32e..c89f5d4401 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py @@ -88,8 +88,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -104,11 +102,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_base.py index 6da50202c5..465640bcae 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_base.py @@ -873,6 +873,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -913,6 +917,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1258,6 +1266,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1318,6 +1330,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1675,6 +1691,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1735,6 +1755,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2076,6 +2100,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2148,6 +2176,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2497,6 +2529,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2557,6 +2593,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/match_service/async_client.py b/google/cloud/aiplatform_v1/services/match_service/async_client.py index 090175ebc5..e001ef9a8a 100644 --- a/google/cloud/aiplatform_v1/services/match_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/match_service/async_client.py @@ -117,7 +117,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: MatchServiceAsyncClient: The constructed client. """ - return MatchServiceClient.from_service_account_info.__func__(MatchServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + MatchServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(MatchServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -133,7 +136,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: MatchServiceAsyncClient: The constructed client. """ - return MatchServiceClient.from_service_account_file.__func__(MatchServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + MatchServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(MatchServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/base.py b/google/cloud/aiplatform_v1/services/match_service/transports/base.py index 8b8fa430a8..189946f8be 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/base.py @@ -87,8 +87,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -103,11 +101,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/match_service/transports/rest_base.py index 337407c9be..01b69f1385 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/rest_base.py @@ -842,6 +842,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -882,6 +886,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1227,6 +1235,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1287,6 +1299,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1644,6 +1660,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1704,6 +1724,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2045,6 +2069,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2117,6 +2145,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2466,6 +2498,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2526,6 +2562,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py index df01b930c4..2a26209097 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.metadata_service import pagers from google.cloud.aiplatform_v1.types import artifact from google.cloud.aiplatform_v1.types import artifact as gca_artifact @@ -67,10 +65,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport from .client import MetadataServiceClient @@ -149,7 +149,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: MetadataServiceAsyncClient: The constructed client. """ - return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + MetadataServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(MetadataServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -165,7 +168,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: MetadataServiceAsyncClient: The constructed client. """ - return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + MetadataServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(MetadataServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/metadata_service/client.py b/google/cloud/aiplatform_v1/services/metadata_service/client.py index 349082c75b..182efc1aa2 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.metadata_service import pagers from google.cloud.aiplatform_v1.types import artifact from google.cloud.aiplatform_v1.types import artifact as gca_artifact @@ -83,10 +81,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import MetadataServiceGrpcTransport from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py index 62e6931cdb..d481046adb 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py @@ -98,8 +98,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -114,11 +112,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py index 415d5bb318..8be13d5517 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py @@ -2586,6 +2586,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2626,6 +2630,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2952,6 +2960,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3012,6 +3024,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -3350,6 +3366,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3410,6 +3430,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3732,6 +3756,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3804,6 +3832,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -4134,6 +4166,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4194,6 +4230,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -5399,7 +5439,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5547,7 +5587,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5694,7 +5734,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5843,7 +5883,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5993,7 +6033,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -7639,7 +7679,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -7792,7 +7832,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -7947,7 +7987,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py index 7c84da6537..32b85ae2a4 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py @@ -3727,7 +3727,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3886,7 +3886,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4046,7 +4046,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4206,7 +4206,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4366,7 +4366,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6114,7 +6114,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6280,7 +6280,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6446,7 +6446,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -8005,6 +8005,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -8045,6 +8049,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -8371,6 +8379,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8431,6 +8443,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -8769,6 +8785,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8829,6 +8849,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -9151,6 +9175,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -9223,6 +9251,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -9553,6 +9585,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -9613,6 +9649,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py index c796c585c5..43fccf6373 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py @@ -2398,6 +2398,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2438,6 +2442,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -2783,6 +2791,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2843,6 +2855,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -3200,6 +3216,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3260,6 +3280,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -3601,6 +3625,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3673,6 +3701,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -4022,6 +4054,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4082,6 +4118,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1/services/migration_service/async_client.py index c65632e548..efddaa0c39 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.migration_service import pagers from google.cloud.aiplatform_v1.types import migratable_resource from google.cloud.aiplatform_v1.types import migration_service @@ -54,6 +52,8 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport from .client import MigrationServiceClient @@ -136,7 +136,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: MigrationServiceAsyncClient: The constructed client. """ - return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + MigrationServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(MigrationServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -152,7 +155,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: MigrationServiceAsyncClient: The constructed client. """ - return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + MigrationServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(MigrationServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index e626e873a0..5d14719a74 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.migration_service import pagers from google.cloud.aiplatform_v1.types import migratable_resource from google.cloud.aiplatform_v1.types import migration_service @@ -70,6 +68,8 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import MigrationServiceGrpcTransport from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport @@ -271,18 +271,23 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod @@ -310,23 +315,18 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py index fe4ef89772..3e22c2cce3 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py @@ -88,8 +88,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -104,11 +102,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py b/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py index 449e145eb9..0e57f2a19d 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py @@ -866,6 +866,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -906,6 +910,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1232,6 +1240,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1292,6 +1304,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1630,6 +1646,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1690,6 +1710,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2012,6 +2036,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2084,6 +2112,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2414,6 +2446,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2474,6 +2510,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2582,7 +2622,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py index 998dba0ab2..f5ccc79485 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py @@ -695,7 +695,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1269,6 +1269,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1309,6 +1313,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1635,6 +1643,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1695,6 +1707,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2033,6 +2049,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2093,6 +2113,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2415,6 +2439,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2487,6 +2515,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2817,6 +2849,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2877,6 +2913,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/migration_service/transports/rest_base.py index bb4c9c153c..bb363db174 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/rest_base.py @@ -842,6 +842,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -882,6 +886,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1227,6 +1235,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1287,6 +1299,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1644,6 +1660,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1704,6 +1724,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2045,6 +2069,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2117,6 +2145,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2466,6 +2498,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2526,6 +2562,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py b/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py index bdee217361..b41cf9695e 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model_garden_service from google.cloud.aiplatform_v1.types import publisher_model @@ -54,6 +52,8 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore from .transports.base import ModelGardenServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import ModelGardenServiceGrpcAsyncIOTransport from .client import ModelGardenServiceClient @@ -130,7 +130,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: ModelGardenServiceAsyncClient: The constructed client. """ - return ModelGardenServiceClient.from_service_account_info.__func__(ModelGardenServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + ModelGardenServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(ModelGardenServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -146,7 +149,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: ModelGardenServiceAsyncClient: The constructed client. """ - return ModelGardenServiceClient.from_service_account_file.__func__(ModelGardenServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + ModelGardenServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(ModelGardenServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/client.py b/google/cloud/aiplatform_v1/services/model_garden_service/client.py index a73e492398..65e8390682 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model_garden_service from google.cloud.aiplatform_v1.types import publisher_model @@ -70,6 +68,8 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore from .transports.base import ModelGardenServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import ModelGardenServiceGrpcTransport from .transports.grpc_asyncio import ModelGardenServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py index 65f3b3e3e8..7650417275 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py @@ -89,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -105,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py index 2abde21882..0480590a29 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py @@ -862,6 +862,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -902,6 +906,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1228,6 +1236,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1288,6 +1300,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1626,6 +1642,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1686,6 +1706,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2008,6 +2032,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2080,6 +2108,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2410,6 +2442,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2470,6 +2506,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2579,7 +2619,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_asyncio.py index cf2802589e..8358eb9ca3 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_asyncio.py @@ -693,7 +693,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1250,6 +1250,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1290,6 +1294,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1616,6 +1624,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1676,6 +1688,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2014,6 +2030,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2074,6 +2094,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2396,6 +2420,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2468,6 +2496,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2798,6 +2830,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2858,6 +2894,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_base.py index cf3ee369f9..fe184ebd17 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_base.py @@ -833,6 +833,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -873,6 +877,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1218,6 +1226,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1278,6 +1290,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1635,6 +1651,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1695,6 +1715,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2036,6 +2060,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2108,6 +2136,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2457,6 +2489,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2517,6 +2553,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index ff619028f0..51d7a27d33 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.model_service import pagers from google.cloud.aiplatform_v1.types import deployed_model_ref from google.cloud.aiplatform_v1.types import encryption_spec @@ -63,10 +61,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport from .client import ModelServiceClient @@ -147,7 +147,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: ModelServiceAsyncClient: The constructed client. """ - return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + ModelServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(ModelServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -163,7 +166,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: ModelServiceAsyncClient: The constructed client. """ - return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + ModelServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(ModelServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py index d04b2a5d7b..92a1184722 100644 --- a/google/cloud/aiplatform_v1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.model_service import pagers from google.cloud.aiplatform_v1.types import deployed_model_ref from google.cloud.aiplatform_v1.types import encryption_spec @@ -79,10 +77,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import ModelServiceGrpcTransport from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_service/transports/base.py index aa84cd1f14..df84dcbee4 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/base.py @@ -93,8 +93,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -109,11 +107,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest.py index 6818422422..4db4a0bcf2 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest.py @@ -1838,6 +1838,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1878,6 +1882,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2204,6 +2212,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2264,6 +2276,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2602,6 +2618,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2662,6 +2682,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2984,6 +3008,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3056,6 +3084,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3386,6 +3418,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3446,6 +3482,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3887,7 +3927,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4039,7 +4079,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4188,7 +4228,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4346,7 +4386,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6051,7 +6091,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6370,7 +6410,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py index 64604b4df7..7725bf460b 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py @@ -2105,7 +2105,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2267,7 +2267,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2425,7 +2425,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2595,7 +2595,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4388,7 +4388,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4725,7 +4725,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5127,6 +5127,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5167,6 +5171,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -5493,6 +5501,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5553,6 +5565,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -5891,6 +5907,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5951,6 +5971,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -6273,6 +6297,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -6345,6 +6373,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -6675,6 +6707,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6735,6 +6771,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py index e6afd8dc1a..5bc633e237 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py @@ -1722,6 +1722,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1762,6 +1766,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -2107,6 +2115,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2167,6 +2179,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2524,6 +2540,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2584,6 +2604,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2925,6 +2949,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2997,6 +3025,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3346,6 +3378,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3406,6 +3442,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py index 270a0fef05..d165cc15b0 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.notebook_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import job_state @@ -68,11 +66,13 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import NotebookServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import NotebookServiceGrpcAsyncIOTransport from .client import NotebookServiceClient @@ -163,7 +163,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: NotebookServiceAsyncClient: The constructed client. """ - return NotebookServiceClient.from_service_account_info.__func__(NotebookServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + NotebookServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(NotebookServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -179,7 +182,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: NotebookServiceAsyncClient: The constructed client. """ - return NotebookServiceClient.from_service_account_file.__func__(NotebookServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + NotebookServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(NotebookServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/notebook_service/client.py b/google/cloud/aiplatform_v1/services/notebook_service/client.py index 29444def29..3ea2d02333 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/client.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.notebook_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import job_state @@ -84,11 +82,13 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import NotebookServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import NotebookServiceGrpcTransport from .transports.grpc_asyncio import NotebookServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py index 37dc9d3342..2816915af9 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py @@ -90,8 +90,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -106,11 +104,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py index c3cf1353a3..33befc8bea 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py @@ -1683,6 +1683,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1723,6 +1727,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2049,6 +2057,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2109,6 +2121,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2447,6 +2463,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2507,6 +2527,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2829,6 +2853,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2901,6 +2929,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3231,6 +3263,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3291,6 +3327,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3399,7 +3439,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3558,7 +3598,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3719,7 +3759,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3873,7 +3913,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4027,7 +4067,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4180,7 +4220,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5281,7 +5321,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5437,7 +5477,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5758,7 +5798,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py index 9b81e30eaf..367d984740 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py @@ -1582,7 +1582,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1751,7 +1751,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1922,7 +1922,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2086,7 +2086,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2246,7 +2246,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2411,7 +2411,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3562,7 +3562,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3726,7 +3726,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4065,7 +4065,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4469,6 +4469,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4509,6 +4513,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -4835,6 +4843,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4895,6 +4907,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -5233,6 +5249,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5293,6 +5313,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -5615,6 +5639,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -5687,6 +5715,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -6017,6 +6049,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6077,6 +6113,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_base.py index 354560bd40..dfc8c5b32d 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_base.py @@ -1562,6 +1562,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1602,6 +1606,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1947,6 +1955,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2007,6 +2019,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2364,6 +2380,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2424,6 +2444,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2765,6 +2789,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2837,6 +2865,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3186,6 +3218,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3246,6 +3282,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py index bb0ea15a2f..f819872dff 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.persistent_resource_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -60,10 +58,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import PersistentResourceServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import PersistentResourceServiceGrpcAsyncIOTransport from .client import PersistentResourceServiceClient @@ -158,7 +158,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: PersistentResourceServiceAsyncClient: The constructed client. """ - return PersistentResourceServiceClient.from_service_account_info.__func__(PersistentResourceServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + PersistentResourceServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(PersistentResourceServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -174,7 +177,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: PersistentResourceServiceAsyncClient: The constructed client. """ - return PersistentResourceServiceClient.from_service_account_file.__func__(PersistentResourceServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + PersistentResourceServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + PersistentResourceServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py index 38a057184e..7e3bd46feb 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.persistent_resource_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -76,10 +74,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import PersistentResourceServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import PersistentResourceServiceGrpcTransport from .transports.grpc_asyncio import PersistentResourceServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py index 1a22146607..7d6abdf7ed 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py @@ -89,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -105,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py index 0218dd77b8..0790b31798 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py @@ -1099,6 +1099,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1139,6 +1143,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1465,6 +1473,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1525,6 +1537,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1863,6 +1879,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1923,6 +1943,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2245,6 +2269,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2317,6 +2345,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2647,6 +2679,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2707,6 +2743,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2818,7 +2858,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2972,7 +3012,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3443,7 +3483,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3602,7 +3642,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py index 03627cfd7e..76f51141ea 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py @@ -953,7 +953,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1117,7 +1117,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1616,7 +1616,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1785,7 +1785,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2189,6 +2189,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2229,6 +2233,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2555,6 +2563,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2615,6 +2627,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2953,6 +2969,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3013,6 +3033,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3335,6 +3359,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3407,6 +3435,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3737,6 +3769,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3797,6 +3833,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_base.py index ef64b1e477..a51b25bcde 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_base.py @@ -1057,6 +1057,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1097,6 +1101,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1442,6 +1450,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1502,6 +1514,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1859,6 +1875,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1919,6 +1939,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2260,6 +2284,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2332,6 +2360,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2681,6 +2713,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2741,6 +2777,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index 48bc855dc4..8029eff7bc 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.pipeline_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import model @@ -62,10 +60,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport from .client import PipelineServiceClient @@ -162,7 +162,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: PipelineServiceAsyncClient: The constructed client. """ - return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + PipelineServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(PipelineServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -178,7 +181,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: PipelineServiceAsyncClient: The constructed client. """ - return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + PipelineServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(PipelineServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index 7701c0f0ce..627654278f 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.pipeline_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import model @@ -78,10 +76,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import PipelineServiceGrpcTransport from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py index fb56b71b9f..07c8cea588 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -37,7 +37,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -93,8 +93,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -109,11 +107,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index 52b3590290..f550576884 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -40,7 +40,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO try: diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py index ba6abaad55..220b305912 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -43,7 +43,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO from .grpc import PipelineServiceGrpcTransport diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py index a4807da723..744f8720ae 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py @@ -42,7 +42,7 @@ from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1373,6 +1373,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1413,6 +1417,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1739,6 +1747,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1799,6 +1811,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2137,6 +2153,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2197,6 +2217,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2519,6 +2543,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2591,6 +2619,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2921,6 +2953,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2981,6 +3017,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3090,7 +3130,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3249,7 +3289,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3400,7 +3440,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3517,7 +3557,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3960,7 +4000,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4110,7 +4150,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py index aef88c7cff..be972600d2 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py @@ -55,7 +55,7 @@ from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1253,7 +1253,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1420,7 +1420,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1579,7 +1579,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1702,7 +1702,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2161,7 +2161,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2319,7 +2319,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3366,6 +3366,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3406,6 +3410,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -3732,6 +3740,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3792,6 +3804,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -4130,6 +4146,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4190,6 +4210,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -4512,6 +4536,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -4584,6 +4612,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -4914,6 +4946,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4974,6 +5010,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py index 5c9d04114e..9df3aa4873 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py @@ -32,7 +32,7 @@ from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1357,6 +1357,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1397,6 +1401,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1742,6 +1750,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1802,6 +1814,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2159,6 +2175,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2219,6 +2239,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2560,6 +2584,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2632,6 +2660,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2981,6 +3013,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3041,6 +3077,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index 222909f14b..c6bda4375f 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -48,7 +48,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import content as gca_content from google.cloud.aiplatform_v1.types import explanation @@ -59,9 +58,10 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore +import google.protobuf.any_pb2 as any_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport from .client import PredictionServiceClient @@ -138,7 +138,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: PredictionServiceAsyncClient: The constructed client. """ - return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + PredictionServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(PredictionServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -154,7 +157,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: PredictionServiceAsyncClient: The constructed client. """ - return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + PredictionServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(PredictionServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index 367386e7dc..c78d5cc33d 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -63,7 +63,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import content as gca_content from google.cloud.aiplatform_v1.types import explanation @@ -74,9 +73,10 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore +import google.protobuf.any_pb2 as any_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import PredictionServiceGrpcTransport from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py index b7e61ae977..fc440cfa44 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py @@ -27,12 +27,12 @@ from google.oauth2 import service_account # type: ignore import google.protobuf -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import prediction_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -91,8 +91,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -107,11 +105,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py index 0857a8e2e1..cb8051d284 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py @@ -30,12 +30,12 @@ import grpc # type: ignore import proto # type: ignore -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import prediction_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO try: diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py index 81ed9c6e8a..1d75fb3093 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py @@ -33,12 +33,12 @@ import proto # type: ignore from grpc.experimental import aio # type: ignore -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import prediction_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO from .grpc import PredictionServiceGrpcTransport diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py index f1031919b7..30bce45186 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py @@ -36,8 +36,8 @@ import warnings -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import prediction_service +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -2088,7 +2088,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2681,7 +2681,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_asyncio.py index 043f3368ba..1aea553507 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_asyncio.py @@ -48,8 +48,8 @@ from typing import Any, Dict, List, Callable, Tuple, Optional, Sequence, Union -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import prediction_service +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -2263,7 +2263,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2873,7 +2873,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_base.py index 24399ee420..1f29f74687 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_base.py @@ -27,8 +27,8 @@ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import prediction_service +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1350,6 +1350,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1390,6 +1394,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1735,6 +1743,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1795,6 +1807,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2152,6 +2168,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2212,6 +2232,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2553,6 +2577,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2625,6 +2653,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2974,6 +3006,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3034,6 +3070,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/async_client.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/async_client.py index 4d36c2d70d..8323e920f9 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/async_client.py @@ -47,14 +47,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore +import google.protobuf.any_pb2 as any_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore from .transports.base import ( ReasoningEngineExecutionServiceTransport, DEFAULT_CLIENT_INFO, @@ -136,7 +136,12 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: ReasoningEngineExecutionServiceAsyncClient: The constructed client. """ - return ReasoningEngineExecutionServiceClient.from_service_account_info.__func__(ReasoningEngineExecutionServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + ReasoningEngineExecutionServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func( + ReasoningEngineExecutionServiceAsyncClient, info, *args, **kwargs + ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -152,7 +157,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: ReasoningEngineExecutionServiceAsyncClient: The constructed client. """ - return ReasoningEngineExecutionServiceClient.from_service_account_file.__func__(ReasoningEngineExecutionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + ReasoningEngineExecutionServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + ReasoningEngineExecutionServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/client.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/client.py index fd7b86d7cf..cb3e3883b5 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/client.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/client.py @@ -62,14 +62,14 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore +import google.protobuf.any_pb2 as any_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore from .transports.base import ( ReasoningEngineExecutionServiceTransport, DEFAULT_CLIENT_INFO, diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/base.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/base.py index 09a6cfa7aa..a428817640 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/base.py @@ -27,12 +27,12 @@ from google.oauth2 import service_account # type: ignore import google.protobuf -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -88,8 +88,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -104,11 +102,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc.py index b94153e5bd..2f584f47d4 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc.py @@ -30,12 +30,12 @@ import grpc # type: ignore import proto # type: ignore -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore from .base import ReasoningEngineExecutionServiceTransport, DEFAULT_CLIENT_INFO try: diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py index 223649c529..9368b9a7ac 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py @@ -33,12 +33,12 @@ import proto # type: ignore from grpc.experimental import aio # type: ignore -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore from .base import ReasoningEngineExecutionServiceTransport, DEFAULT_CLIENT_INFO from .grpc import ReasoningEngineExecutionServiceGrpcTransport diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest.py index fbdf192a45..f5277110eb 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest.py @@ -36,8 +36,8 @@ import warnings -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -845,7 +845,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_asyncio.py index f488bb6e2e..b5494db031 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_asyncio.py @@ -48,8 +48,8 @@ from typing import Any, Dict, List, Callable, Tuple, Optional, Sequence, Union -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -919,7 +919,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_base.py index 135bdf8a2b..0942df0627 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_base.py @@ -27,8 +27,8 @@ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -853,6 +853,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -893,6 +897,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1238,6 +1246,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1298,6 +1310,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1655,6 +1671,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1715,6 +1735,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2056,6 +2080,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2128,6 +2156,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2477,6 +2509,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2537,6 +2573,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_service/async_client.py b/google/cloud/aiplatform_v1/services/reasoning_engine_service/async_client.py index d0382eba25..82917f0d20 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.reasoning_engine_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -57,9 +55,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import ReasoningEngineServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import ReasoningEngineServiceGrpcAsyncIOTransport from .client import ReasoningEngineServiceClient @@ -144,7 +144,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: ReasoningEngineServiceAsyncClient: The constructed client. """ - return ReasoningEngineServiceClient.from_service_account_info.__func__(ReasoningEngineServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + ReasoningEngineServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(ReasoningEngineServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -160,7 +163,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: ReasoningEngineServiceAsyncClient: The constructed client. """ - return ReasoningEngineServiceClient.from_service_account_file.__func__(ReasoningEngineServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + ReasoningEngineServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + ReasoningEngineServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_service/client.py b/google/cloud/aiplatform_v1/services/reasoning_engine_service/client.py index 3db110fe60..22ac44992f 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_service/client.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.reasoning_engine_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -73,9 +71,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import ReasoningEngineServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import ReasoningEngineServiceGrpcTransport from .transports.grpc_asyncio import ReasoningEngineServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/base.py b/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/base.py index edddab0c7a..aafa4cbbdd 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/base.py @@ -89,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -105,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest.py b/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest.py index 39197853f6..e28f7a7807 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest.py @@ -1039,6 +1039,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1079,6 +1083,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1405,6 +1413,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1465,6 +1477,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1803,6 +1819,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1863,6 +1883,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2185,6 +2209,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2257,6 +2285,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2587,6 +2619,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2647,6 +2683,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2755,7 +2795,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2906,7 +2946,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3375,7 +3415,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_asyncio.py index 72b5c9872e..fd449a46b1 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_asyncio.py @@ -887,7 +887,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1050,7 +1050,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1539,7 +1539,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1943,6 +1943,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1983,6 +1987,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2309,6 +2317,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2369,6 +2381,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2707,6 +2723,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2767,6 +2787,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3089,6 +3113,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3161,6 +3189,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3491,6 +3523,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3551,6 +3587,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_base.py index c387e36c00..cbb8f9339d 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_base.py @@ -992,6 +992,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1032,6 +1036,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1377,6 +1385,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1437,6 +1449,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1794,6 +1810,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1854,6 +1874,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2195,6 +2219,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2267,6 +2295,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2616,6 +2648,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2676,6 +2712,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/schedule_service/async_client.py b/google/cloud/aiplatform_v1/services/schedule_service/async_client.py index dd8ea75c90..51eeb039cd 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.schedule_service import pagers from google.cloud.aiplatform_v1.types import notebook_service from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -58,9 +56,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import ScheduleServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import ScheduleServiceGrpcAsyncIOTransport from .client import ScheduleServiceClient @@ -166,7 +166,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: ScheduleServiceAsyncClient: The constructed client. """ - return ScheduleServiceClient.from_service_account_info.__func__(ScheduleServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + ScheduleServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(ScheduleServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -182,7 +185,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: ScheduleServiceAsyncClient: The constructed client. """ - return ScheduleServiceClient.from_service_account_file.__func__(ScheduleServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + ScheduleServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(ScheduleServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/schedule_service/client.py b/google/cloud/aiplatform_v1/services/schedule_service/client.py index e47bffea2b..f985b8d614 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/client.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.schedule_service import pagers from google.cloud.aiplatform_v1.types import notebook_service from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -74,9 +72,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import ScheduleServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import ScheduleServiceGrpcTransport from .transports.grpc_asyncio import ScheduleServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/base.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/base.py index 35d9fa1302..d56e7d10c0 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/base.py @@ -35,7 +35,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -91,8 +91,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -107,11 +105,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc.py index 2ccf51ccd7..96768d6925 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc.py @@ -38,7 +38,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import ScheduleServiceTransport, DEFAULT_CLIENT_INFO try: diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc_asyncio.py index eb72dc735b..f8cb44558b 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/grpc_asyncio.py @@ -41,7 +41,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import ScheduleServiceTransport, DEFAULT_CLIENT_INFO from .grpc import ScheduleServiceGrpcTransport diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py index bf2912f3c8..43e84cc647 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py @@ -40,7 +40,7 @@ from google.cloud.aiplatform_v1.types import schedule from google.cloud.aiplatform_v1.types import schedule as gca_schedule from google.cloud.aiplatform_v1.types import schedule_service -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1069,6 +1069,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1109,6 +1113,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1435,6 +1443,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1495,6 +1507,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1833,6 +1849,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1893,6 +1913,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2215,6 +2239,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2287,6 +2315,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2617,6 +2649,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2677,6 +2713,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2933,7 +2973,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3378,7 +3418,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3492,7 +3532,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/rest_asyncio.py index 81614a3fdb..7b53e7c598 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/rest_asyncio.py @@ -53,7 +53,7 @@ from google.cloud.aiplatform_v1.types import schedule from google.cloud.aiplatform_v1.types import schedule as gca_schedule from google.cloud.aiplatform_v1.types import schedule_service -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1085,7 +1085,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1564,7 +1564,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1689,7 +1689,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2226,6 +2226,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2266,6 +2270,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2592,6 +2600,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2652,6 +2664,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2990,6 +3006,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3050,6 +3070,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3372,6 +3396,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3444,6 +3472,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3774,6 +3806,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3834,6 +3870,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/rest_base.py index d4f238c4bf..0f99d359ef 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/rest_base.py @@ -30,7 +30,7 @@ from google.cloud.aiplatform_v1.types import schedule from google.cloud.aiplatform_v1.types import schedule as gca_schedule from google.cloud.aiplatform_v1.types import schedule_service -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1102,6 +1102,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1142,6 +1146,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1487,6 +1495,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1547,6 +1559,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1904,6 +1920,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1964,6 +1984,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2305,6 +2329,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2377,6 +2405,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2726,6 +2758,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2786,6 +2822,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/session_service/__init__.py b/google/cloud/aiplatform_v1/services/session_service/__init__.py new file mode 100644 index 0000000000..a00026df42 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import SessionServiceClient +from .async_client import SessionServiceAsyncClient + +__all__ = ( + "SessionServiceClient", + "SessionServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1/services/session_service/async_client.py b/google/cloud/aiplatform_v1/services/session_service/async_client.py new file mode 100644 index 0000000000..4ccf4cd19c --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/async_client.py @@ -0,0 +1,1930 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import logging as std_logging +from collections import OrderedDict +import re +from typing import ( + Dict, + Callable, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.cloud.aiplatform_v1.services.session_service import pagers +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import session +from google.cloud.aiplatform_v1.types import session as gca_session +from google.cloud.aiplatform_v1.types import session_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +from .transports.base import SessionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import SessionServiceGrpcAsyncIOTransport +from .client import SessionServiceClient + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class SessionServiceAsyncClient: + """The service that manages Vertex Session related resources.""" + + _client: SessionServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = SessionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = SessionServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = SessionServiceClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = SessionServiceClient._DEFAULT_UNIVERSE + + reasoning_engine_path = staticmethod(SessionServiceClient.reasoning_engine_path) + parse_reasoning_engine_path = staticmethod( + SessionServiceClient.parse_reasoning_engine_path + ) + session_path = staticmethod(SessionServiceClient.session_path) + parse_session_path = staticmethod(SessionServiceClient.parse_session_path) + session_event_path = staticmethod(SessionServiceClient.session_event_path) + parse_session_event_path = staticmethod( + SessionServiceClient.parse_session_event_path + ) + common_billing_account_path = staticmethod( + SessionServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + SessionServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(SessionServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + SessionServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + SessionServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + SessionServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(SessionServiceClient.common_project_path) + parse_common_project_path = staticmethod( + SessionServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(SessionServiceClient.common_location_path) + parse_common_location_path = staticmethod( + SessionServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SessionServiceAsyncClient: The constructed client. + """ + sa_info_func = ( + SessionServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(SessionServiceAsyncClient, info, *args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SessionServiceAsyncClient: The constructed client. + """ + sa_file_func = ( + SessionServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(SessionServiceAsyncClient, filename, *args, **kwargs) + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return SessionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> SessionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SessionServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = SessionServiceClient.get_transport_class + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[str, SessionServiceTransport, Callable[..., SessionServiceTransport]] + ] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the session service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,SessionServiceTransport,Callable[..., SessionServiceTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the SessionServiceTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = SessionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.aiplatform_v1.SessionServiceAsyncClient`.", + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "credentialsType": None, + } + ), + ) + + async def create_session( + self, + request: Optional[Union[session_service.CreateSessionRequest, dict]] = None, + *, + parent: Optional[str] = None, + session: Optional[gca_session.Session] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new [Session][google.cloud.aiplatform.v1.Session]. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_create_session(): + # Create a client + client = aiplatform_v1.SessionServiceAsyncClient() + + # Initialize request argument(s) + session = aiplatform_v1.Session() + session.user_id = "user_id_value" + + request = aiplatform_v1.CreateSessionRequest( + parent="parent_value", + session=session, + ) + + # Make the request + operation = client.create_session(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.CreateSessionRequest, dict]]): + The request object. Request message for + [SessionService.CreateSession][google.cloud.aiplatform.v1.SessionService.CreateSession]. + parent (:class:`str`): + Required. The resource name of the location to create + the session in. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + session (:class:`google.cloud.aiplatform_v1.types.Session`): + Required. The session to create. + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.Session` A + session contains a set of actions between users and + Vertex agents. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, session] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.CreateSessionRequest): + request = session_service.CreateSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if session is not None: + request.session = session + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_session + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_session.Session, + metadata_type=session_service.CreateSessionOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_session( + self, + request: Optional[Union[session_service.GetSessionRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session.Session: + r"""Gets details of the specific + [Session][google.cloud.aiplatform.v1.Session]. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_get_session(): + # Create a client + client = aiplatform_v1.SessionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetSessionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_session(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GetSessionRequest, dict]]): + The request object. Request message for + [SessionService.GetSession][google.cloud.aiplatform.v1.SessionService.GetSession]. + name (:class:`str`): + Required. The resource name of the session. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.Session: + A session contains a set of actions + between users and Vertex agents. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.GetSessionRequest): + request = session_service.GetSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_session + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_sessions( + self, + request: Optional[Union[session_service.ListSessionsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListSessionsAsyncPager: + r"""Lists [Sessions][google.cloud.aiplatform.v1.Session] in a given + reasoning engine. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_list_sessions(): + # Create a client + client = aiplatform_v1.SessionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListSessionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_sessions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.ListSessionsRequest, dict]]): + The request object. Request message for + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions]. + parent (:class:`str`): + Required. The resource name of the location to list + sessions from. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.services.session_service.pagers.ListSessionsAsyncPager: + Response message for + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.ListSessionsRequest): + request = session_service.ListSessionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_sessions + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSessionsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_session( + self, + request: Optional[Union[session_service.UpdateSessionRequest, dict]] = None, + *, + session: Optional[gca_session.Session] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_session.Session: + r"""Updates the specific + [Session][google.cloud.aiplatform.v1.Session]. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_update_session(): + # Create a client + client = aiplatform_v1.SessionServiceAsyncClient() + + # Initialize request argument(s) + session = aiplatform_v1.Session() + session.user_id = "user_id_value" + + request = aiplatform_v1.UpdateSessionRequest( + session=session, + ) + + # Make the request + response = await client.update_session(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.UpdateSessionRequest, dict]]): + The request object. Request message for + [SessionService.UpdateSession][google.cloud.aiplatform.v1.SessionService.UpdateSession]. + session (:class:`google.cloud.aiplatform_v1.types.Session`): + Required. The session to update. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. Field mask is used to + control which fields get updated. If the + mask is not present, all fields will be + updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.Session: + A session contains a set of actions + between users and Vertex agents. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [session, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.UpdateSessionRequest): + request = session_service.UpdateSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if session is not None: + request.session = session + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_session + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("session.name", request.session.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_session( + self, + request: Optional[Union[session_service.DeleteSessionRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes details of the specific + [Session][google.cloud.aiplatform.v1.Session]. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_delete_session(): + # Create a client + client = aiplatform_v1.SessionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteSessionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_session(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.DeleteSessionRequest, dict]]): + The request object. Request message for + [SessionService.DeleteSession][google.cloud.aiplatform.v1.SessionService.DeleteSession]. + name (:class:`str`): + Required. The resource name of the session. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.DeleteSessionRequest): + request = session_service.DeleteSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_session + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_events( + self, + request: Optional[Union[session_service.ListEventsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListEventsAsyncPager: + r"""Lists [Events][google.cloud.aiplatform.v1.Event] in a given + session. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_list_events(): + # Create a client + client = aiplatform_v1.SessionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEventsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_events(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.ListEventsRequest, dict]]): + The request object. Request message for + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents]. + parent (:class:`str`): + Required. The resource name of the session to list + events from. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.services.session_service.pagers.ListEventsAsyncPager: + Response message for + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.ListEventsRequest): + request = session_service.ListEventsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_events + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListEventsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def append_event( + self, + request: Optional[Union[session_service.AppendEventRequest, dict]] = None, + *, + name: Optional[str] = None, + event: Optional[session.SessionEvent] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session_service.AppendEventResponse: + r"""Appends an event to a given session. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_append_event(): + # Create a client + client = aiplatform_v1.SessionServiceAsyncClient() + + # Initialize request argument(s) + event = aiplatform_v1.SessionEvent() + event.author = "author_value" + event.invocation_id = "invocation_id_value" + + request = aiplatform_v1.AppendEventRequest( + name="name_value", + event=event, + ) + + # Make the request + response = await client.append_event(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.AppendEventRequest, dict]]): + The request object. Request message for + [SessionService.AppendEvent][google.cloud.aiplatform.v1.SessionService.AppendEvent]. + name (:class:`str`): + Required. The resource name of the session to append + event to. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + event (:class:`google.cloud.aiplatform_v1.types.SessionEvent`): + Required. The event to append to the + session. + + This corresponds to the ``event`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.AppendEventResponse: + Response message for + [SessionService.AppendEvent][google.cloud.aiplatform.v1.SessionService.AppendEvent]. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, event] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.AppendEventRequest): + request = session_service.AppendEventRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if event is not None: + request.event = event + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.append_event + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.list_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.delete_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.cancel_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.wait_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[ + self._client._transport.test_iam_permissions + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_location] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.list_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "SessionServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +__all__ = ("SessionServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/session_service/client.py b/google/cloud/aiplatform_v1/services/session_service/client.py new file mode 100644 index 0000000000..53d9ef4766 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/client.py @@ -0,0 +1,2460 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging +import os +import re +from typing import ( + Dict, + Callable, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + +from google.cloud.aiplatform_v1.services.session_service import pagers +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import session +from google.cloud.aiplatform_v1.types import session as gca_session +from google.cloud.aiplatform_v1.types import session_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +from .transports.base import SessionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import SessionServiceGrpcTransport +from .transports.grpc_asyncio import SessionServiceGrpcAsyncIOTransport +from .transports.rest import SessionServiceRestTransport + +try: + from .transports.rest_asyncio import AsyncSessionServiceRestTransport + + HAS_ASYNC_REST_DEPENDENCIES = True +except ImportError as e: # pragma: NO COVER + HAS_ASYNC_REST_DEPENDENCIES = False + ASYNC_REST_EXCEPTION = e + + +class SessionServiceClientMeta(type): + """Metaclass for the SessionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[SessionServiceTransport]] + _transport_registry["grpc"] = SessionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = SessionServiceGrpcAsyncIOTransport + _transport_registry["rest"] = SessionServiceRestTransport + if HAS_ASYNC_REST_DEPENDENCIES: # pragma: NO COVER + _transport_registry["rest_asyncio"] = AsyncSessionServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[SessionServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if ( + label == "rest_asyncio" and not HAS_ASYNC_REST_DEPENDENCIES + ): # pragma: NO COVER + raise ASYNC_REST_EXCEPTION + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SessionServiceClient(metaclass=SessionServiceClientMeta): + """The service that manages Vertex Session related resources.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SessionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SessionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SessionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SessionServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def reasoning_engine_path( + project: str, + location: str, + reasoning_engine: str, + ) -> str: + """Returns a fully-qualified reasoning_engine string.""" + return "projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}".format( + project=project, + location=location, + reasoning_engine=reasoning_engine, + ) + + @staticmethod + def parse_reasoning_engine_path(path: str) -> Dict[str, str]: + """Parses a reasoning_engine path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/reasoningEngines/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def session_path( + project: str, + location: str, + reasoning_engine: str, + session: str, + ) -> str: + """Returns a fully-qualified session string.""" + return "projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}".format( + project=project, + location=location, + reasoning_engine=reasoning_engine, + session=session, + ) + + @staticmethod + def parse_session_path(path: str) -> Dict[str, str]: + """Parses a session path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/reasoningEngines/(?P.+?)/sessions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def session_event_path( + project: str, + location: str, + reasoning_engine: str, + session: str, + event: str, + ) -> str: + """Returns a fully-qualified session_event string.""" + return "projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}/events/{event}".format( + project=project, + location=location, + reasoning_engine=reasoning_engine, + session=session, + event=event, + ) + + @staticmethod + def parse_session_event_path(path: str) -> Dict[str, str]: + """Parses a session_event path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/reasoningEngines/(?P.+?)/sessions/(?P.+?)/events/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = SessionServiceClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert: + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = SessionServiceClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert, use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = SessionServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = SessionServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = SessionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = SessionServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True + + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[str, SessionServiceTransport, Callable[..., SessionServiceTransport]] + ] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the session service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,SessionServiceTransport,Callable[..., SessionServiceTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the SessionServiceTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + SessionServiceClient._read_environment_variables() + ) + self._client_cert_source = SessionServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = SessionServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, SessionServiceTransport) + if transport_provided: + # transport is a SessionServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(SessionServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or SessionServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + transport_init: Union[ + Type[SessionServiceTransport], Callable[..., SessionServiceTransport] + ] = ( + SessionServiceClient.get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., SessionServiceTransport], transport) + ) + + if "rest_asyncio" in str(transport_init): + unsupported_params = { + "google.api_core.client_options.ClientOptions.credentials_file": self._client_options.credentials_file, + "google.api_core.client_options.ClientOptions.scopes": self._client_options.scopes, + "google.api_core.client_options.ClientOptions.quota_project_id": self._client_options.quota_project_id, + "google.api_core.client_options.ClientOptions.client_cert_source": self._client_options.client_cert_source, + "google.api_core.client_options.ClientOptions.api_audience": self._client_options.api_audience, + } + provided_unsupported_params = [ + name + for name, value in unsupported_params.items() + if value is not None + ] + if provided_unsupported_params: + raise core_exceptions.AsyncRestUnsupportedParameterError( # type: ignore + f"The following provided parameters are not supported for `transport=rest_asyncio`: {', '.join(provided_unsupported_params)}" + ) + self._transport = transport_init( + credentials=credentials, + host=self._api_endpoint, + client_info=client_info, + ) + return + + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + # initialize with the provided callable or the passed in class + self._transport = transport_init( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.aiplatform_v1.SessionServiceClient`.", + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "credentialsType": None, + } + ), + ) + + def create_session( + self, + request: Optional[Union[session_service.CreateSessionRequest, dict]] = None, + *, + parent: Optional[str] = None, + session: Optional[gca_session.Session] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gac_operation.Operation: + r"""Creates a new [Session][google.cloud.aiplatform.v1.Session]. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_create_session(): + # Create a client + client = aiplatform_v1.SessionServiceClient() + + # Initialize request argument(s) + session = aiplatform_v1.Session() + session.user_id = "user_id_value" + + request = aiplatform_v1.CreateSessionRequest( + parent="parent_value", + session=session, + ) + + # Make the request + operation = client.create_session(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateSessionRequest, dict]): + The request object. Request message for + [SessionService.CreateSession][google.cloud.aiplatform.v1.SessionService.CreateSession]. + parent (str): + Required. The resource name of the location to create + the session in. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + session (google.cloud.aiplatform_v1.types.Session): + Required. The session to create. + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.Session` A + session contains a set of actions between users and + Vertex agents. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, session] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.CreateSessionRequest): + request = session_service.CreateSessionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if session is not None: + request.session = session + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_session] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_session.Session, + metadata_type=session_service.CreateSessionOperationMetadata, + ) + + # Done; return the response. + return response + + def get_session( + self, + request: Optional[Union[session_service.GetSessionRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session.Session: + r"""Gets details of the specific + [Session][google.cloud.aiplatform.v1.Session]. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_get_session(): + # Create a client + client = aiplatform_v1.SessionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetSessionRequest( + name="name_value", + ) + + # Make the request + response = client.get_session(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetSessionRequest, dict]): + The request object. Request message for + [SessionService.GetSession][google.cloud.aiplatform.v1.SessionService.GetSession]. + name (str): + Required. The resource name of the session. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.Session: + A session contains a set of actions + between users and Vertex agents. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.GetSessionRequest): + request = session_service.GetSessionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_session] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_sessions( + self, + request: Optional[Union[session_service.ListSessionsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListSessionsPager: + r"""Lists [Sessions][google.cloud.aiplatform.v1.Session] in a given + reasoning engine. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_list_sessions(): + # Create a client + client = aiplatform_v1.SessionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListSessionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_sessions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListSessionsRequest, dict]): + The request object. Request message for + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions]. + parent (str): + Required. The resource name of the location to list + sessions from. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.services.session_service.pagers.ListSessionsPager: + Response message for + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.ListSessionsRequest): + request = session_service.ListSessionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_sessions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSessionsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_session( + self, + request: Optional[Union[session_service.UpdateSessionRequest, dict]] = None, + *, + session: Optional[gca_session.Session] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_session.Session: + r"""Updates the specific + [Session][google.cloud.aiplatform.v1.Session]. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_update_session(): + # Create a client + client = aiplatform_v1.SessionServiceClient() + + # Initialize request argument(s) + session = aiplatform_v1.Session() + session.user_id = "user_id_value" + + request = aiplatform_v1.UpdateSessionRequest( + session=session, + ) + + # Make the request + response = client.update_session(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateSessionRequest, dict]): + The request object. Request message for + [SessionService.UpdateSession][google.cloud.aiplatform.v1.SessionService.UpdateSession]. + session (google.cloud.aiplatform_v1.types.Session): + Required. The session to update. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Field mask is used to + control which fields get updated. If the + mask is not present, all fields will be + updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.Session: + A session contains a set of actions + between users and Vertex agents. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [session, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.UpdateSessionRequest): + request = session_service.UpdateSessionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if session is not None: + request.session = session + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_session] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("session.name", request.session.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_session( + self, + request: Optional[Union[session_service.DeleteSessionRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gac_operation.Operation: + r"""Deletes details of the specific + [Session][google.cloud.aiplatform.v1.Session]. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_delete_session(): + # Create a client + client = aiplatform_v1.SessionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteSessionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_session(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteSessionRequest, dict]): + The request object. Request message for + [SessionService.DeleteSession][google.cloud.aiplatform.v1.SessionService.DeleteSession]. + name (str): + Required. The resource name of the session. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.DeleteSessionRequest): + request = session_service.DeleteSessionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_session] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def list_events( + self, + request: Optional[Union[session_service.ListEventsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListEventsPager: + r"""Lists [Events][google.cloud.aiplatform.v1.Event] in a given + session. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_list_events(): + # Create a client + client = aiplatform_v1.SessionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEventsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_events(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListEventsRequest, dict]): + The request object. Request message for + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents]. + parent (str): + Required. The resource name of the session to list + events from. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.services.session_service.pagers.ListEventsPager: + Response message for + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.ListEventsRequest): + request = session_service.ListEventsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_events] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListEventsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def append_event( + self, + request: Optional[Union[session_service.AppendEventRequest, dict]] = None, + *, + name: Optional[str] = None, + event: Optional[session.SessionEvent] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session_service.AppendEventResponse: + r"""Appends an event to a given session. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_append_event(): + # Create a client + client = aiplatform_v1.SessionServiceClient() + + # Initialize request argument(s) + event = aiplatform_v1.SessionEvent() + event.author = "author_value" + event.invocation_id = "invocation_id_value" + + request = aiplatform_v1.AppendEventRequest( + name="name_value", + event=event, + ) + + # Make the request + response = client.append_event(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.AppendEventRequest, dict]): + The request object. Request message for + [SessionService.AppendEvent][google.cloud.aiplatform.v1.SessionService.AppendEvent]. + name (str): + Required. The resource name of the session to append + event to. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + event (google.cloud.aiplatform_v1.types.SessionEvent): + Required. The event to append to the + session. + + This corresponds to the ``event`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.AppendEventResponse: + Response message for + [SessionService.AppendEvent][google.cloud.aiplatform.v1.SessionService.AppendEvent]. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, event] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, session_service.AppendEventRequest): + request = session_service.AppendEventRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if event is not None: + request.event = event + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.append_event] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "SessionServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.wait_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_location] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + +__all__ = ("SessionServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/session_service/pagers.py b/google/cloud/aiplatform_v1/services/session_service/pagers.py new file mode 100644 index 0000000000..fd893d6b67 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/pagers.py @@ -0,0 +1,353 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, + Union, +) + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[ + retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None + ] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + +from google.cloud.aiplatform_v1.types import session +from google.cloud.aiplatform_v1.types import session_service + + +class ListSessionsPager: + """A pager for iterating through ``list_sessions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListSessionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``sessions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSessions`` requests and continue to iterate + through the ``sessions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListSessionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., session_service.ListSessionsResponse], + request: session_service.ListSessionsRequest, + response: session_service.ListSessionsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListSessionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListSessionsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = session_service.ListSessionsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[session_service.ListSessionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[session.Session]: + for page in self.pages: + yield from page.sessions + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSessionsAsyncPager: + """A pager for iterating through ``list_sessions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListSessionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``sessions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSessions`` requests and continue to iterate + through the ``sessions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListSessionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[session_service.ListSessionsResponse]], + request: session_service.ListSessionsRequest, + response: session_service.ListSessionsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListSessionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListSessionsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = session_service.ListSessionsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[session_service.ListSessionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[session.Session]: + async def async_generator(): + async for page in self.pages: + for response in page.sessions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListEventsPager: + """A pager for iterating through ``list_events`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListEventsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``session_events`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListEvents`` requests and continue to iterate + through the ``session_events`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListEventsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., session_service.ListEventsResponse], + request: session_service.ListEventsRequest, + response: session_service.ListEventsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListEventsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListEventsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = session_service.ListEventsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[session_service.ListEventsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[session.SessionEvent]: + for page in self.pages: + yield from page.session_events + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListEventsAsyncPager: + """A pager for iterating through ``list_events`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListEventsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``session_events`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListEvents`` requests and continue to iterate + through the ``session_events`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListEventsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[session_service.ListEventsResponse]], + request: session_service.ListEventsRequest, + response: session_service.ListEventsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListEventsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListEventsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = session_service.ListEventsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[session_service.ListEventsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[session.SessionEvent]: + async def async_generator(): + async for page in self.pages: + for response in page.session_events: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/session_service/transports/README.rst b/google/cloud/aiplatform_v1/services/session_service/transports/README.rst new file mode 100644 index 0000000000..f67320e75a --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`SessionServiceTransport` is the ABC for all transports. +- public child `SessionServiceGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `SessionServiceGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseSessionServiceRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `SessionServiceRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/google/cloud/aiplatform_v1/services/session_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/session_service/transports/__init__.py new file mode 100644 index 0000000000..d4890abd08 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/transports/__init__.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type, Tuple + +from .base import SessionServiceTransport +from .grpc import SessionServiceGrpcTransport +from .grpc_asyncio import SessionServiceGrpcAsyncIOTransport +from .rest import SessionServiceRestTransport +from .rest import SessionServiceRestInterceptor + +ASYNC_REST_CLASSES: Tuple[str, ...] +try: + from .rest_asyncio import AsyncSessionServiceRestTransport + from .rest_asyncio import AsyncSessionServiceRestInterceptor + + ASYNC_REST_CLASSES = ( + "AsyncSessionServiceRestTransport", + "AsyncSessionServiceRestInterceptor", + ) + HAS_REST_ASYNC = True +except ImportError: # pragma: NO COVER + ASYNC_REST_CLASSES = () + HAS_REST_ASYNC = False + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SessionServiceTransport]] +_transport_registry["grpc"] = SessionServiceGrpcTransport +_transport_registry["grpc_asyncio"] = SessionServiceGrpcAsyncIOTransport +_transport_registry["rest"] = SessionServiceRestTransport +if HAS_REST_ASYNC: # pragma: NO COVER + _transport_registry["rest_asyncio"] = AsyncSessionServiceRestTransport + +__all__ = ( + "SessionServiceTransport", + "SessionServiceGrpcTransport", + "SessionServiceGrpcAsyncIOTransport", + "SessionServiceRestTransport", + "SessionServiceRestInterceptor", +) + ASYNC_REST_CLASSES diff --git a/google/cloud/aiplatform_v1/services/session_service/transports/base.py b/google/cloud/aiplatform_v1/services/session_service/transports/base.py new file mode 100644 index 0000000000..2f7faad965 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/transports/base.py @@ -0,0 +1,424 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + +from google.cloud.aiplatform_v1.types import session +from google.cloud.aiplatform_v1.types import session as gca_session +from google.cloud.aiplatform_v1.types import session_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class SessionServiceTransport(abc.ABC): + """Abstract transport class for SessionService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the scopes. + self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, + ) + elif credentials is None and not self._ignore_credentials: + credentials, _ = google.auth.default( + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_session: gapic_v1.method.wrap_method( + self.create_session, + default_timeout=None, + client_info=client_info, + ), + self.get_session: gapic_v1.method.wrap_method( + self.get_session, + default_timeout=None, + client_info=client_info, + ), + self.list_sessions: gapic_v1.method.wrap_method( + self.list_sessions, + default_timeout=None, + client_info=client_info, + ), + self.update_session: gapic_v1.method.wrap_method( + self.update_session, + default_timeout=None, + client_info=client_info, + ), + self.delete_session: gapic_v1.method.wrap_method( + self.delete_session, + default_timeout=None, + client_info=client_info, + ), + self.list_events: gapic_v1.method.wrap_method( + self.list_events, + default_timeout=None, + client_info=client_info, + ), + self.append_event: gapic_v1.method.wrap_method( + self.append_event, + default_timeout=None, + client_info=client_info, + ), + self.get_location: gapic_v1.method.wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: gapic_v1.method.wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: gapic_v1.method.wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: gapic_v1.method.wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: gapic_v1.method.wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: gapic_v1.method.wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + self.wait_operation: gapic_v1.method.wrap_method( + self.wait_operation, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_session( + self, + ) -> Callable[ + [session_service.CreateSessionRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_session( + self, + ) -> Callable[ + [session_service.GetSessionRequest], + Union[session.Session, Awaitable[session.Session]], + ]: + raise NotImplementedError() + + @property + def list_sessions( + self, + ) -> Callable[ + [session_service.ListSessionsRequest], + Union[ + session_service.ListSessionsResponse, + Awaitable[session_service.ListSessionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_session( + self, + ) -> Callable[ + [session_service.UpdateSessionRequest], + Union[gca_session.Session, Awaitable[gca_session.Session]], + ]: + raise NotImplementedError() + + @property + def delete_session( + self, + ) -> Callable[ + [session_service.DeleteSessionRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_events( + self, + ) -> Callable[ + [session_service.ListEventsRequest], + Union[ + session_service.ListEventsResponse, + Awaitable[session_service.ListEventsResponse], + ], + ]: + raise NotImplementedError() + + @property + def append_event( + self, + ) -> Callable[ + [session_service.AppendEventRequest], + Union[ + session_service.AppendEventResponse, + Awaitable[session_service.AppendEventResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("SessionServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/session_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/session_service/transports/grpc.py new file mode 100644 index 0000000000..01455ac2aa --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/transports/grpc.py @@ -0,0 +1,753 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json +import logging as std_logging +import pickle +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message + +import grpc # type: ignore +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import session +from google.cloud.aiplatform_v1.types import session as gca_session +from google.cloud.aiplatform_v1.types import session_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import SessionServiceTransport, DEFAULT_CLIENT_INFO + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + + +class SessionServiceGrpcTransport(SessionServiceTransport): + """gRPC backend transport for SessionService. + + The service that manages Vertex Session related resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, grpc.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_session( + self, + ) -> Callable[[session_service.CreateSessionRequest], operations_pb2.Operation]: + r"""Return a callable for the create session method over gRPC. + + Creates a new [Session][google.cloud.aiplatform.v1.Session]. + + Returns: + Callable[[~.CreateSessionRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_session" not in self._stubs: + self._stubs["create_session"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/CreateSession", + request_serializer=session_service.CreateSessionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_session"] + + @property + def get_session( + self, + ) -> Callable[[session_service.GetSessionRequest], session.Session]: + r"""Return a callable for the get session method over gRPC. + + Gets details of the specific + [Session][google.cloud.aiplatform.v1.Session]. + + Returns: + Callable[[~.GetSessionRequest], + ~.Session]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_session" not in self._stubs: + self._stubs["get_session"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/GetSession", + request_serializer=session_service.GetSessionRequest.serialize, + response_deserializer=session.Session.deserialize, + ) + return self._stubs["get_session"] + + @property + def list_sessions( + self, + ) -> Callable[ + [session_service.ListSessionsRequest], session_service.ListSessionsResponse + ]: + r"""Return a callable for the list sessions method over gRPC. + + Lists [Sessions][google.cloud.aiplatform.v1.Session] in a given + reasoning engine. + + Returns: + Callable[[~.ListSessionsRequest], + ~.ListSessionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_sessions" not in self._stubs: + self._stubs["list_sessions"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/ListSessions", + request_serializer=session_service.ListSessionsRequest.serialize, + response_deserializer=session_service.ListSessionsResponse.deserialize, + ) + return self._stubs["list_sessions"] + + @property + def update_session( + self, + ) -> Callable[[session_service.UpdateSessionRequest], gca_session.Session]: + r"""Return a callable for the update session method over gRPC. + + Updates the specific + [Session][google.cloud.aiplatform.v1.Session]. + + Returns: + Callable[[~.UpdateSessionRequest], + ~.Session]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_session" not in self._stubs: + self._stubs["update_session"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/UpdateSession", + request_serializer=session_service.UpdateSessionRequest.serialize, + response_deserializer=gca_session.Session.deserialize, + ) + return self._stubs["update_session"] + + @property + def delete_session( + self, + ) -> Callable[[session_service.DeleteSessionRequest], operations_pb2.Operation]: + r"""Return a callable for the delete session method over gRPC. + + Deletes details of the specific + [Session][google.cloud.aiplatform.v1.Session]. + + Returns: + Callable[[~.DeleteSessionRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_session" not in self._stubs: + self._stubs["delete_session"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/DeleteSession", + request_serializer=session_service.DeleteSessionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_session"] + + @property + def list_events( + self, + ) -> Callable[ + [session_service.ListEventsRequest], session_service.ListEventsResponse + ]: + r"""Return a callable for the list events method over gRPC. + + Lists [Events][google.cloud.aiplatform.v1.Event] in a given + session. + + Returns: + Callable[[~.ListEventsRequest], + ~.ListEventsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_events" not in self._stubs: + self._stubs["list_events"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/ListEvents", + request_serializer=session_service.ListEventsRequest.serialize, + response_deserializer=session_service.ListEventsResponse.deserialize, + ) + return self._stubs["list_events"] + + @property + def append_event( + self, + ) -> Callable[ + [session_service.AppendEventRequest], session_service.AppendEventResponse + ]: + r"""Return a callable for the append event method over gRPC. + + Appends an event to a given session. + + Returns: + Callable[[~.AppendEventRequest], + ~.AppendEventResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "append_event" not in self._stubs: + self._stubs["append_event"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/AppendEvent", + request_serializer=session_service.AppendEventRequest.serialize, + response_deserializer=session_service.AppendEventResponse.deserialize, + ) + return self._stubs["append_event"] + + def close(self): + self._logged_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "wait_operation" not in self._stubs: + self._stubs["wait_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("SessionServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/session_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/session_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..1cf6c0dbf8 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/transports/grpc_asyncio.py @@ -0,0 +1,865 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import inspect +import json +import pickle +import logging as std_logging +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry_async as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message + +import grpc # type: ignore +import proto # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import session +from google.cloud.aiplatform_v1.types import session as gca_session +from google.cloud.aiplatform_v1.types import session_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import SessionServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import SessionServiceGrpcTransport + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + + +class SessionServiceGrpcAsyncIOTransport(SessionServiceTransport): + """gRPC AsyncIO backend transport for SessionService. + + The service that manages Vertex Session related resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, aio.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) + # Wrap messages. This must be done after self._logged_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self._logged_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_session( + self, + ) -> Callable[ + [session_service.CreateSessionRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the create session method over gRPC. + + Creates a new [Session][google.cloud.aiplatform.v1.Session]. + + Returns: + Callable[[~.CreateSessionRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_session" not in self._stubs: + self._stubs["create_session"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/CreateSession", + request_serializer=session_service.CreateSessionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_session"] + + @property + def get_session( + self, + ) -> Callable[[session_service.GetSessionRequest], Awaitable[session.Session]]: + r"""Return a callable for the get session method over gRPC. + + Gets details of the specific + [Session][google.cloud.aiplatform.v1.Session]. + + Returns: + Callable[[~.GetSessionRequest], + Awaitable[~.Session]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_session" not in self._stubs: + self._stubs["get_session"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/GetSession", + request_serializer=session_service.GetSessionRequest.serialize, + response_deserializer=session.Session.deserialize, + ) + return self._stubs["get_session"] + + @property + def list_sessions( + self, + ) -> Callable[ + [session_service.ListSessionsRequest], + Awaitable[session_service.ListSessionsResponse], + ]: + r"""Return a callable for the list sessions method over gRPC. + + Lists [Sessions][google.cloud.aiplatform.v1.Session] in a given + reasoning engine. + + Returns: + Callable[[~.ListSessionsRequest], + Awaitable[~.ListSessionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_sessions" not in self._stubs: + self._stubs["list_sessions"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/ListSessions", + request_serializer=session_service.ListSessionsRequest.serialize, + response_deserializer=session_service.ListSessionsResponse.deserialize, + ) + return self._stubs["list_sessions"] + + @property + def update_session( + self, + ) -> Callable[ + [session_service.UpdateSessionRequest], Awaitable[gca_session.Session] + ]: + r"""Return a callable for the update session method over gRPC. + + Updates the specific + [Session][google.cloud.aiplatform.v1.Session]. + + Returns: + Callable[[~.UpdateSessionRequest], + Awaitable[~.Session]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_session" not in self._stubs: + self._stubs["update_session"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/UpdateSession", + request_serializer=session_service.UpdateSessionRequest.serialize, + response_deserializer=gca_session.Session.deserialize, + ) + return self._stubs["update_session"] + + @property + def delete_session( + self, + ) -> Callable[ + [session_service.DeleteSessionRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete session method over gRPC. + + Deletes details of the specific + [Session][google.cloud.aiplatform.v1.Session]. + + Returns: + Callable[[~.DeleteSessionRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_session" not in self._stubs: + self._stubs["delete_session"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/DeleteSession", + request_serializer=session_service.DeleteSessionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_session"] + + @property + def list_events( + self, + ) -> Callable[ + [session_service.ListEventsRequest], + Awaitable[session_service.ListEventsResponse], + ]: + r"""Return a callable for the list events method over gRPC. + + Lists [Events][google.cloud.aiplatform.v1.Event] in a given + session. + + Returns: + Callable[[~.ListEventsRequest], + Awaitable[~.ListEventsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_events" not in self._stubs: + self._stubs["list_events"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/ListEvents", + request_serializer=session_service.ListEventsRequest.serialize, + response_deserializer=session_service.ListEventsResponse.deserialize, + ) + return self._stubs["list_events"] + + @property + def append_event( + self, + ) -> Callable[ + [session_service.AppendEventRequest], + Awaitable[session_service.AppendEventResponse], + ]: + r"""Return a callable for the append event method over gRPC. + + Appends an event to a given session. + + Returns: + Callable[[~.AppendEventRequest], + Awaitable[~.AppendEventResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "append_event" not in self._stubs: + self._stubs["append_event"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.SessionService/AppendEvent", + request_serializer=session_service.AppendEventRequest.serialize, + response_deserializer=session_service.AppendEventResponse.deserialize, + ) + return self._stubs["append_event"] + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.create_session: self._wrap_method( + self.create_session, + default_timeout=None, + client_info=client_info, + ), + self.get_session: self._wrap_method( + self.get_session, + default_timeout=None, + client_info=client_info, + ), + self.list_sessions: self._wrap_method( + self.list_sessions, + default_timeout=None, + client_info=client_info, + ), + self.update_session: self._wrap_method( + self.update_session, + default_timeout=None, + client_info=client_info, + ), + self.delete_session: self._wrap_method( + self.delete_session, + default_timeout=None, + client_info=client_info, + ), + self.list_events: self._wrap_method( + self.list_events, + default_timeout=None, + client_info=client_info, + ), + self.append_event: self._wrap_method( + self.append_event, + default_timeout=None, + client_info=client_info, + ), + self.get_location: self._wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: self._wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: self._wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: self._wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: self._wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: self._wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: self._wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: self._wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: self._wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + self.wait_operation: self._wrap_method( + self.wait_operation, + default_timeout=None, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + + def close(self): + return self._logged_channel.close() + + @property + def kind(self) -> str: + return "grpc_asyncio" + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "wait_operation" not in self._stubs: + self._stubs["wait_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("SessionServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/session_service/transports/rest.py b/google/cloud/aiplatform_v1/services/session_service/transports/rest.py new file mode 100644 index 0000000000..7175edf300 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/transports/rest.py @@ -0,0 +1,5312 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import logging +import json # type: ignore + +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import gapic_v1 +import google.protobuf + +from google.protobuf import json_format +from google.api_core import operations_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore + +from requests import __version__ as requests_version +import dataclasses +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + + +from google.cloud.aiplatform_v1.types import session +from google.cloud.aiplatform_v1.types import session as gca_session +from google.cloud.aiplatform_v1.types import session_service +from google.longrunning import operations_pb2 # type: ignore + + +from .rest_base import _BaseSessionServiceRestTransport +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=f"requests@{requests_version}", +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class SessionServiceRestInterceptor: + """Interceptor for SessionService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the SessionServiceRestTransport. + + .. code-block:: python + class MyCustomSessionServiceInterceptor(SessionServiceRestInterceptor): + def pre_append_event(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_append_event(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_session(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_session(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_session(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_session(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_session(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_session(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_events(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_events(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_sessions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_sessions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_session(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_session(self, response): + logging.log(f"Received response: {response}") + return response + + transport = SessionServiceRestTransport(interceptor=MyCustomSessionServiceInterceptor()) + client = SessionServiceClient(transport=transport) + + + """ + + def pre_append_event( + self, + request: session_service.AppendEventRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.AppendEventRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for append_event + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_append_event( + self, response: session_service.AppendEventResponse + ) -> session_service.AppendEventResponse: + """Post-rpc interceptor for append_event + + DEPRECATED. Please use the `post_append_event_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_append_event` interceptor runs + before the `post_append_event_with_metadata` interceptor. + """ + return response + + def post_append_event_with_metadata( + self, + response: session_service.AppendEventResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.AppendEventResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for append_event + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_append_event_with_metadata` + interceptor in new development instead of the `post_append_event` interceptor. + When both interceptors are used, this `post_append_event_with_metadata` interceptor runs after the + `post_append_event` interceptor. The (possibly modified) response returned by + `post_append_event` will be passed to + `post_append_event_with_metadata`. + """ + return response, metadata + + def pre_create_session( + self, + request: session_service.CreateSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.CreateSessionRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for create_session + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_create_session( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_session + + DEPRECATED. Please use the `post_create_session_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_create_session` interceptor runs + before the `post_create_session_with_metadata` interceptor. + """ + return response + + def post_create_session_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_session + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_create_session_with_metadata` + interceptor in new development instead of the `post_create_session` interceptor. + When both interceptors are used, this `post_create_session_with_metadata` interceptor runs after the + `post_create_session` interceptor. The (possibly modified) response returned by + `post_create_session` will be passed to + `post_create_session_with_metadata`. + """ + return response, metadata + + def pre_delete_session( + self, + request: session_service.DeleteSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.DeleteSessionRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for delete_session + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_delete_session( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_session + + DEPRECATED. Please use the `post_delete_session_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_delete_session` interceptor runs + before the `post_delete_session_with_metadata` interceptor. + """ + return response + + def post_delete_session_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_session + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_delete_session_with_metadata` + interceptor in new development instead of the `post_delete_session` interceptor. + When both interceptors are used, this `post_delete_session_with_metadata` interceptor runs after the + `post_delete_session` interceptor. The (possibly modified) response returned by + `post_delete_session` will be passed to + `post_delete_session_with_metadata`. + """ + return response, metadata + + def pre_get_session( + self, + request: session_service.GetSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.GetSessionRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_session + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_get_session(self, response: session.Session) -> session.Session: + """Post-rpc interceptor for get_session + + DEPRECATED. Please use the `post_get_session_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_get_session` interceptor runs + before the `post_get_session_with_metadata` interceptor. + """ + return response + + def post_get_session_with_metadata( + self, + response: session.Session, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[session.Session, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_session + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_get_session_with_metadata` + interceptor in new development instead of the `post_get_session` interceptor. + When both interceptors are used, this `post_get_session_with_metadata` interceptor runs after the + `post_get_session` interceptor. The (possibly modified) response returned by + `post_get_session` will be passed to + `post_get_session_with_metadata`. + """ + return response, metadata + + def pre_list_events( + self, + request: session_service.ListEventsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.ListEventsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_events + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_list_events( + self, response: session_service.ListEventsResponse + ) -> session_service.ListEventsResponse: + """Post-rpc interceptor for list_events + + DEPRECATED. Please use the `post_list_events_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_list_events` interceptor runs + before the `post_list_events_with_metadata` interceptor. + """ + return response + + def post_list_events_with_metadata( + self, + response: session_service.ListEventsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.ListEventsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_events + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_list_events_with_metadata` + interceptor in new development instead of the `post_list_events` interceptor. + When both interceptors are used, this `post_list_events_with_metadata` interceptor runs after the + `post_list_events` interceptor. The (possibly modified) response returned by + `post_list_events` will be passed to + `post_list_events_with_metadata`. + """ + return response, metadata + + def pre_list_sessions( + self, + request: session_service.ListSessionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.ListSessionsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_sessions + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_list_sessions( + self, response: session_service.ListSessionsResponse + ) -> session_service.ListSessionsResponse: + """Post-rpc interceptor for list_sessions + + DEPRECATED. Please use the `post_list_sessions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_list_sessions` interceptor runs + before the `post_list_sessions_with_metadata` interceptor. + """ + return response + + def post_list_sessions_with_metadata( + self, + response: session_service.ListSessionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.ListSessionsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_sessions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_list_sessions_with_metadata` + interceptor in new development instead of the `post_list_sessions` interceptor. + When both interceptors are used, this `post_list_sessions_with_metadata` interceptor runs after the + `post_list_sessions` interceptor. The (possibly modified) response returned by + `post_list_sessions` will be passed to + `post_list_sessions_with_metadata`. + """ + return response, metadata + + def pre_update_session( + self, + request: session_service.UpdateSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.UpdateSessionRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for update_session + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_update_session(self, response: gca_session.Session) -> gca_session.Session: + """Post-rpc interceptor for update_session + + DEPRECATED. Please use the `post_update_session_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_update_session` interceptor runs + before the `post_update_session_with_metadata` interceptor. + """ + return response + + def post_update_session_with_metadata( + self, + response: gca_session.Session, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_session.Session, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_session + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_update_session_with_metadata` + interceptor in new development instead of the `post_update_session` interceptor. + When both interceptors are used, this `post_update_session_with_metadata` interceptor runs after the + `post_update_session` interceptor. The (possibly modified) response returned by + `post_update_session` will be passed to + `post_update_session_with_metadata`. + """ + return response, metadata + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.GetLocationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.ListLocationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.WaitOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class SessionServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: SessionServiceRestInterceptor + + +class SessionServiceRestTransport(_BaseSessionServiceRestTransport): + """REST backend synchronous transport for SessionService. + + The service that manages Vertex Session related resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[SessionServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or SessionServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, + ], + "google.longrunning.Operations.WaitOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _AppendEvent( + _BaseSessionServiceRestTransport._BaseAppendEvent, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.AppendEvent") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: session_service.AppendEventRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session_service.AppendEventResponse: + r"""Call the append event method over HTTP. + + Args: + request (~.session_service.AppendEventRequest): + The request object. Request message for + [SessionService.AppendEvent][google.cloud.aiplatform.v1.SessionService.AppendEvent]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.session_service.AppendEventResponse: + Response message for + [SessionService.AppendEvent][google.cloud.aiplatform.v1.SessionService.AppendEvent]. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseAppendEvent._get_http_options() + ) + + request, metadata = self._interceptor.pre_append_event(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseAppendEvent._get_transcoded_request( + http_options, request + ) + + body = _BaseSessionServiceRestTransport._BaseAppendEvent._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseAppendEvent._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.AppendEvent", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "AppendEvent", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._AppendEvent._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = session_service.AppendEventResponse() + pb_resp = session_service.AppendEventResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_append_event(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_append_event_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = session_service.AppendEventResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceClient.append_event", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "AppendEvent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateSession( + _BaseSessionServiceRestTransport._BaseCreateSession, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.CreateSession") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: session_service.CreateSessionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create session method over HTTP. + + Args: + request (~.session_service.CreateSessionRequest): + The request object. Request message for + [SessionService.CreateSession][google.cloud.aiplatform.v1.SessionService.CreateSession]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseCreateSession._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_session(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseCreateSession._get_transcoded_request( + http_options, request + ) + + body = _BaseSessionServiceRestTransport._BaseCreateSession._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseCreateSession._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.CreateSession", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "CreateSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._CreateSession._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_session(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_session_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceClient.create_session", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "CreateSession", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteSession( + _BaseSessionServiceRestTransport._BaseDeleteSession, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.DeleteSession") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: session_service.DeleteSessionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete session method over HTTP. + + Args: + request (~.session_service.DeleteSessionRequest): + The request object. Request message for + [SessionService.DeleteSession][google.cloud.aiplatform.v1.SessionService.DeleteSession]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseDeleteSession._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_session(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseDeleteSession._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseDeleteSession._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.DeleteSession", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "DeleteSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._DeleteSession._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_delete_session(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_session_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceClient.delete_session", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "DeleteSession", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetSession( + _BaseSessionServiceRestTransport._BaseGetSession, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.GetSession") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: session_service.GetSessionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session.Session: + r"""Call the get session method over HTTP. + + Args: + request (~.session_service.GetSessionRequest): + The request object. Request message for + [SessionService.GetSession][google.cloud.aiplatform.v1.SessionService.GetSession]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.session.Session: + A session contains a set of actions + between users and Vertex agents. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseGetSession._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_session(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseGetSession._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = ( + _BaseSessionServiceRestTransport._BaseGetSession._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.GetSession", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._GetSession._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = session.Session() + pb_resp = session.Session.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_session(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_session_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = session.Session.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceClient.get_session", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetSession", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListEvents( + _BaseSessionServiceRestTransport._BaseListEvents, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.ListEvents") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: session_service.ListEventsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session_service.ListEventsResponse: + r"""Call the list events method over HTTP. + + Args: + request (~.session_service.ListEventsRequest): + The request object. Request message for + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.session_service.ListEventsResponse: + Response message for + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents]. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseListEvents._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_events(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseListEvents._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = ( + _BaseSessionServiceRestTransport._BaseListEvents._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.ListEvents", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListEvents", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._ListEvents._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = session_service.ListEventsResponse() + pb_resp = session_service.ListEventsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_events(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_events_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = session_service.ListEventsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceClient.list_events", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListEvents", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListSessions( + _BaseSessionServiceRestTransport._BaseListSessions, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.ListSessions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: session_service.ListSessionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session_service.ListSessionsResponse: + r"""Call the list sessions method over HTTP. + + Args: + request (~.session_service.ListSessionsRequest): + The request object. Request message for + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.session_service.ListSessionsResponse: + Response message for + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions]. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseListSessions._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_sessions(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseListSessions._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseListSessions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.ListSessions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListSessions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._ListSessions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = session_service.ListSessionsResponse() + pb_resp = session_service.ListSessionsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_sessions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_sessions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = session_service.ListSessionsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceClient.list_sessions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListSessions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _UpdateSession( + _BaseSessionServiceRestTransport._BaseUpdateSession, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.UpdateSession") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: session_service.UpdateSessionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_session.Session: + r"""Call the update session method over HTTP. + + Args: + request (~.session_service.UpdateSessionRequest): + The request object. Request message for + [SessionService.UpdateSession][google.cloud.aiplatform.v1.SessionService.UpdateSession]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.gca_session.Session: + A session contains a set of actions + between users and Vertex agents. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseUpdateSession._get_http_options() + ) + + request, metadata = self._interceptor.pre_update_session(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseUpdateSession._get_transcoded_request( + http_options, request + ) + + body = _BaseSessionServiceRestTransport._BaseUpdateSession._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseUpdateSession._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.UpdateSession", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "UpdateSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._UpdateSession._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_session.Session() + pb_resp = gca_session.Session.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_update_session(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_session_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_session.Session.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceClient.update_session", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "UpdateSession", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + @property + def append_event( + self, + ) -> Callable[ + [session_service.AppendEventRequest], session_service.AppendEventResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._AppendEvent(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_session( + self, + ) -> Callable[[session_service.CreateSessionRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateSession(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_session( + self, + ) -> Callable[[session_service.DeleteSessionRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteSession(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_session( + self, + ) -> Callable[[session_service.GetSessionRequest], session.Session]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetSession(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_events( + self, + ) -> Callable[ + [session_service.ListEventsRequest], session_service.ListEventsResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListEvents(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_sessions( + self, + ) -> Callable[ + [session_service.ListSessionsRequest], session_service.ListSessionsResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListSessions(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_session( + self, + ) -> Callable[[session_service.UpdateSessionRequest], gca_session.Session]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateSession(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation( + _BaseSessionServiceRestTransport._BaseGetLocation, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.GetLocation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseGetLocation._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_location(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseGetLocation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseGetLocation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetLocation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._GetLocation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = locations_pb2.Location() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_location(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetLocation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations( + _BaseSessionServiceRestTransport._BaseListLocations, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.ListLocations") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseListLocations._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseListLocations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseListLocations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListLocations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._ListLocations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_list_locations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListLocations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy( + _BaseSessionServiceRestTransport._BaseGetIamPolicy, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.GetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseGetIamPolicy._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy( + _BaseSessionServiceRestTransport._BaseSetIamPolicy, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.SetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseSetIamPolicy._get_http_options() + ) + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) + + body = _BaseSessionServiceRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_set_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "SetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions( + _BaseSessionServiceRestTransport._BaseTestIamPermissions, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.TestIamPermissions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseTestIamPermissions._get_http_options() + ) + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseTestIamPermissions._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseTestIamPermissions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._TestIamPermissions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_test_iam_permissions(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "TestIamPermissions", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation( + _BaseSessionServiceRestTransport._BaseCancelOperation, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.CancelOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseCancelOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseCancelOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseCancelOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.CancelOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "CancelOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._CancelOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation( + _BaseSessionServiceRestTransport._BaseDeleteOperation, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.DeleteOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseDeleteOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseDeleteOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseDeleteOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.DeleteOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "DeleteOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._DeleteOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation( + _BaseSessionServiceRestTransport._BaseGetOperation, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.GetOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseGetOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseGetOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseGetOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._GetOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations( + _BaseSessionServiceRestTransport._BaseListOperations, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.ListOperations") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseListOperations._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseListOperations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseListOperations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._ListOperations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_list_operations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListOperations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation( + _BaseSessionServiceRestTransport._BaseWaitOperation, SessionServiceRestStub + ): + def __hash__(self): + return hash("SessionServiceRestTransport.WaitOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseWaitOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + transcoded_request = _BaseSessionServiceRestTransport._BaseWaitOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseWaitOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "WaitOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = SessionServiceRestTransport._WaitOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_wait_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "WaitOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("SessionServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1/services/session_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/session_service/transports/rest_asyncio.py new file mode 100644 index 0000000000..4fecd41db7 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/transports/rest_asyncio.py @@ -0,0 +1,5554 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import google.auth + +try: + import aiohttp # type: ignore + from google.auth.aio.transport.sessions import AsyncAuthorizedSession # type: ignore + from google.api_core import rest_streaming_async # type: ignore + from google.api_core.operations_v1 import AsyncOperationsRestClient # type: ignore +except ImportError as e: # pragma: NO COVER + raise ImportError( + "`rest_asyncio` transport requires the library to be installed with the `async_rest` extra. Install the library with the `async_rest` extra using `pip install google-cloud-aiplatform[async_rest]`" + ) from e + +from google.auth.aio import credentials as ga_credentials_async # type: ignore + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import operations_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.api_core import retry_async as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming_async # type: ignore +import google.protobuf + +from google.protobuf import json_format +from google.api_core import operations_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore + +import json # type: ignore +import dataclasses +from typing import Any, Dict, List, Callable, Tuple, Optional, Sequence, Union + + +from google.cloud.aiplatform_v1.types import session +from google.cloud.aiplatform_v1.types import session as gca_session +from google.cloud.aiplatform_v1.types import session_service +from google.longrunning import operations_pb2 # type: ignore + + +from .rest_base import _BaseSessionServiceRestTransport + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +import logging + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=f"google-auth@{google.auth.__version__}", +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class AsyncSessionServiceRestInterceptor: + """Asynchronous Interceptor for SessionService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the AsyncSessionServiceRestTransport. + + .. code-block:: python + class MyCustomSessionServiceInterceptor(SessionServiceRestInterceptor): + async def pre_append_event(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_append_event(self, response): + logging.log(f"Received response: {response}") + return response + + async def pre_create_session(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_create_session(self, response): + logging.log(f"Received response: {response}") + return response + + async def pre_delete_session(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_delete_session(self, response): + logging.log(f"Received response: {response}") + return response + + async def pre_get_session(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_get_session(self, response): + logging.log(f"Received response: {response}") + return response + + async def pre_list_events(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_list_events(self, response): + logging.log(f"Received response: {response}") + return response + + async def pre_list_sessions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_list_sessions(self, response): + logging.log(f"Received response: {response}") + return response + + async def pre_update_session(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_update_session(self, response): + logging.log(f"Received response: {response}") + return response + + transport = AsyncSessionServiceRestTransport(interceptor=MyCustomSessionServiceInterceptor()) + client = async SessionServiceClient(transport=transport) + + + """ + + async def pre_append_event( + self, + request: session_service.AppendEventRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.AppendEventRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for append_event + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_append_event( + self, response: session_service.AppendEventResponse + ) -> session_service.AppendEventResponse: + """Post-rpc interceptor for append_event + + DEPRECATED. Please use the `post_append_event_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_append_event` interceptor runs + before the `post_append_event_with_metadata` interceptor. + """ + return response + + async def post_append_event_with_metadata( + self, + response: session_service.AppendEventResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.AppendEventResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for append_event + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_append_event_with_metadata` + interceptor in new development instead of the `post_append_event` interceptor. + When both interceptors are used, this `post_append_event_with_metadata` interceptor runs after the + `post_append_event` interceptor. The (possibly modified) response returned by + `post_append_event` will be passed to + `post_append_event_with_metadata`. + """ + return response, metadata + + async def pre_create_session( + self, + request: session_service.CreateSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.CreateSessionRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for create_session + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_create_session( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_session + + DEPRECATED. Please use the `post_create_session_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_create_session` interceptor runs + before the `post_create_session_with_metadata` interceptor. + """ + return response + + async def post_create_session_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_session + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_create_session_with_metadata` + interceptor in new development instead of the `post_create_session` interceptor. + When both interceptors are used, this `post_create_session_with_metadata` interceptor runs after the + `post_create_session` interceptor. The (possibly modified) response returned by + `post_create_session` will be passed to + `post_create_session_with_metadata`. + """ + return response, metadata + + async def pre_delete_session( + self, + request: session_service.DeleteSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.DeleteSessionRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for delete_session + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_delete_session( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_session + + DEPRECATED. Please use the `post_delete_session_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_delete_session` interceptor runs + before the `post_delete_session_with_metadata` interceptor. + """ + return response + + async def post_delete_session_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_session + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_delete_session_with_metadata` + interceptor in new development instead of the `post_delete_session` interceptor. + When both interceptors are used, this `post_delete_session_with_metadata` interceptor runs after the + `post_delete_session` interceptor. The (possibly modified) response returned by + `post_delete_session` will be passed to + `post_delete_session_with_metadata`. + """ + return response, metadata + + async def pre_get_session( + self, + request: session_service.GetSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.GetSessionRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_session + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_get_session(self, response: session.Session) -> session.Session: + """Post-rpc interceptor for get_session + + DEPRECATED. Please use the `post_get_session_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_get_session` interceptor runs + before the `post_get_session_with_metadata` interceptor. + """ + return response + + async def post_get_session_with_metadata( + self, + response: session.Session, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[session.Session, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_session + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_get_session_with_metadata` + interceptor in new development instead of the `post_get_session` interceptor. + When both interceptors are used, this `post_get_session_with_metadata` interceptor runs after the + `post_get_session` interceptor. The (possibly modified) response returned by + `post_get_session` will be passed to + `post_get_session_with_metadata`. + """ + return response, metadata + + async def pre_list_events( + self, + request: session_service.ListEventsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.ListEventsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_events + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_list_events( + self, response: session_service.ListEventsResponse + ) -> session_service.ListEventsResponse: + """Post-rpc interceptor for list_events + + DEPRECATED. Please use the `post_list_events_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_list_events` interceptor runs + before the `post_list_events_with_metadata` interceptor. + """ + return response + + async def post_list_events_with_metadata( + self, + response: session_service.ListEventsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.ListEventsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_events + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_list_events_with_metadata` + interceptor in new development instead of the `post_list_events` interceptor. + When both interceptors are used, this `post_list_events_with_metadata` interceptor runs after the + `post_list_events` interceptor. The (possibly modified) response returned by + `post_list_events` will be passed to + `post_list_events_with_metadata`. + """ + return response, metadata + + async def pre_list_sessions( + self, + request: session_service.ListSessionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.ListSessionsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_sessions + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_list_sessions( + self, response: session_service.ListSessionsResponse + ) -> session_service.ListSessionsResponse: + """Post-rpc interceptor for list_sessions + + DEPRECATED. Please use the `post_list_sessions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_list_sessions` interceptor runs + before the `post_list_sessions_with_metadata` interceptor. + """ + return response + + async def post_list_sessions_with_metadata( + self, + response: session_service.ListSessionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.ListSessionsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_sessions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_list_sessions_with_metadata` + interceptor in new development instead of the `post_list_sessions` interceptor. + When both interceptors are used, this `post_list_sessions_with_metadata` interceptor runs after the + `post_list_sessions` interceptor. The (possibly modified) response returned by + `post_list_sessions` will be passed to + `post_list_sessions_with_metadata`. + """ + return response, metadata + + async def pre_update_session( + self, + request: session_service.UpdateSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + session_service.UpdateSessionRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for update_session + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_update_session( + self, response: gca_session.Session + ) -> gca_session.Session: + """Post-rpc interceptor for update_session + + DEPRECATED. Please use the `post_update_session_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. This `post_update_session` interceptor runs + before the `post_update_session_with_metadata` interceptor. + """ + return response + + async def post_update_session_with_metadata( + self, + response: gca_session.Session, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_session.Session, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_session + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the SessionService server but before it is returned to user code. + + We recommend only using this `post_update_session_with_metadata` + interceptor in new development instead of the `post_update_session` interceptor. + When both interceptors are used, this `post_update_session_with_metadata` interceptor runs after the + `post_update_session` interceptor. The (possibly modified) response returned by + `post_update_session` will be passed to + `post_update_session_with_metadata`. + """ + return response, metadata + + async def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.GetLocationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + async def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.ListLocationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + async def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_get_iam_policy( + self, response: policy_pb2.Policy + ) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + async def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_set_iam_policy( + self, response: policy_pb2.Policy + ) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + async def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + async def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + async def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + async def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + async def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + async def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.WaitOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the SessionService server. + """ + return request, metadata + + async def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the SessionService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class AsyncSessionServiceRestStub: + _session: AsyncAuthorizedSession + _host: str + _interceptor: AsyncSessionServiceRestInterceptor + + +class AsyncSessionServiceRestTransport(_BaseSessionServiceRestTransport): + """Asynchronous REST backend transport for SessionService. + + The service that manages Vertex Session related resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials_async.Credentials] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + url_scheme: str = "https", + interceptor: Optional[AsyncSessionServiceRestInterceptor] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.aio.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + url_scheme (str): the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=False, + url_scheme=url_scheme, + api_audience=None, + ) + self._session = AsyncAuthorizedSession(self._credentials) # type: ignore + self._interceptor = interceptor or AsyncSessionServiceRestInterceptor() + self._wrap_with_kind = True + self._prep_wrapped_messages(client_info) + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.create_session: self._wrap_method( + self.create_session, + default_timeout=None, + client_info=client_info, + ), + self.get_session: self._wrap_method( + self.get_session, + default_timeout=None, + client_info=client_info, + ), + self.list_sessions: self._wrap_method( + self.list_sessions, + default_timeout=None, + client_info=client_info, + ), + self.update_session: self._wrap_method( + self.update_session, + default_timeout=None, + client_info=client_info, + ), + self.delete_session: self._wrap_method( + self.delete_session, + default_timeout=None, + client_info=client_info, + ), + self.list_events: self._wrap_method( + self.list_events, + default_timeout=None, + client_info=client_info, + ), + self.append_event: self._wrap_method( + self.append_event, + default_timeout=None, + client_info=client_info, + ), + self.get_location: self._wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: self._wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: self._wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: self._wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: self._wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: self._wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: self._wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: self._wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: self._wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + self.wait_operation: self._wrap_method( + self.wait_operation, + default_timeout=None, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + + class _AppendEvent( + _BaseSessionServiceRestTransport._BaseAppendEvent, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.AppendEvent") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: session_service.AppendEventRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session_service.AppendEventResponse: + r"""Call the append event method over HTTP. + + Args: + request (~.session_service.AppendEventRequest): + The request object. Request message for + [SessionService.AppendEvent][google.cloud.aiplatform.v1.SessionService.AppendEvent]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.session_service.AppendEventResponse: + Response message for + [SessionService.AppendEvent][google.cloud.aiplatform.v1.SessionService.AppendEvent]. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseAppendEvent._get_http_options() + ) + + request, metadata = await self._interceptor.pre_append_event( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseAppendEvent._get_transcoded_request( + http_options, request + ) + + body = _BaseSessionServiceRestTransport._BaseAppendEvent._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseAppendEvent._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.AppendEvent", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "AppendEvent", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._AppendEvent._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = session_service.AppendEventResponse() + pb_resp = session_service.AppendEventResponse.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_append_event(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_append_event_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = session_service.AppendEventResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.append_event", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "AppendEvent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + class _CreateSession( + _BaseSessionServiceRestTransport._BaseCreateSession, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.CreateSession") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: session_service.CreateSessionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create session method over HTTP. + + Args: + request (~.session_service.CreateSessionRequest): + The request object. Request message for + [SessionService.CreateSession][google.cloud.aiplatform.v1.SessionService.CreateSession]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseCreateSession._get_http_options() + ) + + request, metadata = await self._interceptor.pre_create_session( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseCreateSession._get_transcoded_request( + http_options, request + ) + + body = _BaseSessionServiceRestTransport._BaseCreateSession._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseCreateSession._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.CreateSession", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "CreateSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._CreateSession._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = operations_pb2.Operation() + pb_resp = resp + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_create_session(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_session_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.create_session", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "CreateSession", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + class _DeleteSession( + _BaseSessionServiceRestTransport._BaseDeleteSession, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.DeleteSession") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: session_service.DeleteSessionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete session method over HTTP. + + Args: + request (~.session_service.DeleteSessionRequest): + The request object. Request message for + [SessionService.DeleteSession][google.cloud.aiplatform.v1.SessionService.DeleteSession]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseDeleteSession._get_http_options() + ) + + request, metadata = await self._interceptor.pre_delete_session( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseDeleteSession._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseDeleteSession._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.DeleteSession", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "DeleteSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._DeleteSession._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = operations_pb2.Operation() + pb_resp = resp + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_delete_session(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_session_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.delete_session", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "DeleteSession", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + class _GetSession( + _BaseSessionServiceRestTransport._BaseGetSession, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.GetSession") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: session_service.GetSessionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session.Session: + r"""Call the get session method over HTTP. + + Args: + request (~.session_service.GetSessionRequest): + The request object. Request message for + [SessionService.GetSession][google.cloud.aiplatform.v1.SessionService.GetSession]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.session.Session: + A session contains a set of actions + between users and Vertex agents. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseGetSession._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_session( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseGetSession._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = ( + _BaseSessionServiceRestTransport._BaseGetSession._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.GetSession", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncSessionServiceRestTransport._GetSession._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = session.Session() + pb_resp = session.Session.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_get_session(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_session_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = session.Session.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.get_session", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetSession", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + class _ListEvents( + _BaseSessionServiceRestTransport._BaseListEvents, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.ListEvents") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: session_service.ListEventsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session_service.ListEventsResponse: + r"""Call the list events method over HTTP. + + Args: + request (~.session_service.ListEventsRequest): + The request object. Request message for + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.session_service.ListEventsResponse: + Response message for + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents]. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseListEvents._get_http_options() + ) + + request, metadata = await self._interceptor.pre_list_events( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseListEvents._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = ( + _BaseSessionServiceRestTransport._BaseListEvents._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.ListEvents", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListEvents", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncSessionServiceRestTransport._ListEvents._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = session_service.ListEventsResponse() + pb_resp = session_service.ListEventsResponse.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_list_events(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_events_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = session_service.ListEventsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.list_events", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListEvents", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + class _ListSessions( + _BaseSessionServiceRestTransport._BaseListSessions, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.ListSessions") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: session_service.ListSessionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> session_service.ListSessionsResponse: + r"""Call the list sessions method over HTTP. + + Args: + request (~.session_service.ListSessionsRequest): + The request object. Request message for + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.session_service.ListSessionsResponse: + Response message for + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions]. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseListSessions._get_http_options() + ) + + request, metadata = await self._interceptor.pre_list_sessions( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseListSessions._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseListSessions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.ListSessions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListSessions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._ListSessions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = session_service.ListSessionsResponse() + pb_resp = session_service.ListSessionsResponse.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_list_sessions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_sessions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = session_service.ListSessionsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.list_sessions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListSessions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + class _UpdateSession( + _BaseSessionServiceRestTransport._BaseUpdateSession, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.UpdateSession") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: session_service.UpdateSessionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_session.Session: + r"""Call the update session method over HTTP. + + Args: + request (~.session_service.UpdateSessionRequest): + The request object. Request message for + [SessionService.UpdateSession][google.cloud.aiplatform.v1.SessionService.UpdateSession]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.gca_session.Session: + A session contains a set of actions + between users and Vertex agents. + + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseUpdateSession._get_http_options() + ) + + request, metadata = await self._interceptor.pre_update_session( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseUpdateSession._get_transcoded_request( + http_options, request + ) + + body = _BaseSessionServiceRestTransport._BaseUpdateSession._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseUpdateSession._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.UpdateSession", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "UpdateSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._UpdateSession._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = gca_session.Session() + pb_resp = gca_session.Session.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_update_session(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_session_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_session.Session.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.update_session", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "UpdateSession", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + @property + def operations_client(self) -> AsyncOperationsRestClient: + """Create the async client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, + ], + "google.longrunning.Operations.WaitOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, + ], + } + + rest_transport = operations_v1.AsyncOperationsRestTransport( # type: ignore + host=self._host, + # use the credentials which are saved + credentials=self._credentials, # type: ignore + http_options=http_options, + path_prefix="v1", + ) + + self._operations_client = AsyncOperationsRestClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + @property + def append_event( + self, + ) -> Callable[ + [session_service.AppendEventRequest], session_service.AppendEventResponse + ]: + return self._AppendEvent(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_session( + self, + ) -> Callable[[session_service.CreateSessionRequest], operations_pb2.Operation]: + return self._CreateSession(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_session( + self, + ) -> Callable[[session_service.DeleteSessionRequest], operations_pb2.Operation]: + return self._DeleteSession(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_session( + self, + ) -> Callable[[session_service.GetSessionRequest], session.Session]: + return self._GetSession(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_events( + self, + ) -> Callable[ + [session_service.ListEventsRequest], session_service.ListEventsResponse + ]: + return self._ListEvents(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_sessions( + self, + ) -> Callable[ + [session_service.ListSessionsRequest], session_service.ListSessionsResponse + ]: + return self._ListSessions(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_session( + self, + ) -> Callable[[session_service.UpdateSessionRequest], gca_session.Session]: + return self._UpdateSession(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation( + _BaseSessionServiceRestTransport._BaseGetLocation, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.GetLocation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseGetLocation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_location( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseGetLocation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseGetLocation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetLocation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._GetLocation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = locations_pb2.Location() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_get_location(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetLocation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations( + _BaseSessionServiceRestTransport._BaseListLocations, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.ListLocations") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseListLocations._get_http_options() + ) + + request, metadata = await self._interceptor.pre_list_locations( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseListLocations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseListLocations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListLocations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._ListLocations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_list_locations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListLocations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy( + _BaseSessionServiceRestTransport._BaseGetIamPolicy, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.GetIamPolicy") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseGetIamPolicy._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_iam_policy( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_get_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy( + _BaseSessionServiceRestTransport._BaseSetIamPolicy, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.SetIamPolicy") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseSetIamPolicy._get_http_options() + ) + + request, metadata = await self._interceptor.pre_set_iam_policy( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) + + body = _BaseSessionServiceRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_set_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "SetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions( + _BaseSessionServiceRestTransport._BaseTestIamPermissions, + AsyncSessionServiceRestStub, + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.TestIamPermissions") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseTestIamPermissions._get_http_options() + ) + + request, metadata = await self._interceptor.pre_test_iam_permissions( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseTestIamPermissions._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseTestIamPermissions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncSessionServiceRestTransport._TestIamPermissions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_test_iam_permissions(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "TestIamPermissions", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation( + _BaseSessionServiceRestTransport._BaseCancelOperation, + AsyncSessionServiceRestStub, + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.CancelOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseCancelOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_cancel_operation( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseCancelOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseCancelOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.CancelOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "CancelOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._CancelOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + return await self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation( + _BaseSessionServiceRestTransport._BaseDeleteOperation, + AsyncSessionServiceRestStub, + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.DeleteOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseDeleteOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_delete_operation( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseDeleteOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseDeleteOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.DeleteOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "DeleteOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._DeleteOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + return await self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation( + _BaseSessionServiceRestTransport._BaseGetOperation, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.GetOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseGetOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_operation( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseGetOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseGetOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._GetOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_get_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "GetOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations( + _BaseSessionServiceRestTransport._BaseListOperations, + AsyncSessionServiceRestStub, + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.ListOperations") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseListOperations._get_http_options() + ) + + request, metadata = await self._interceptor.pre_list_operations( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseListOperations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseListOperations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._ListOperations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_list_operations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "ListOperations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation( + _BaseSessionServiceRestTransport._BaseWaitOperation, AsyncSessionServiceRestStub + ): + def __hash__(self): + return hash("AsyncSessionServiceRestTransport.WaitOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options = ( + _BaseSessionServiceRestTransport._BaseWaitOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_wait_operation( + request, metadata + ) + transcoded_request = _BaseSessionServiceRestTransport._BaseWaitOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseSessionServiceRestTransport._BaseWaitOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.SessionServiceClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "WaitOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncSessionServiceRestTransport._WaitOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_wait_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.SessionServiceAsyncClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.SessionService", + "rpcName": "WaitOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def kind(self) -> str: + return "rest_asyncio" + + async def close(self): + await self._session.close() diff --git a/google/cloud/aiplatform_v1/services/session_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/session_service/transports/rest_base.py new file mode 100644 index 0000000000..f33175d298 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/session_service/transports/rest_base.py @@ -0,0 +1,2831 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json # type: ignore +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from .base import SessionServiceTransport, DEFAULT_CLIENT_INFO + +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + + +from google.cloud.aiplatform_v1.types import session +from google.cloud.aiplatform_v1.types import session as gca_session +from google.cloud.aiplatform_v1.types import session_service +from google.longrunning import operations_pb2 # type: ignore + + +class _BaseSessionServiceRestTransport(SessionServiceTransport): + """Base REST backend transport for SessionService. + + Note: This class is not meant to be used directly. Use its sync and + async sub-classes instead. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[Any] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[Any]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + class _BaseAppendEvent: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}:appendEvent", + "body": "event", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = session_service.AppendEventRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseSessionServiceRestTransport._BaseAppendEvent._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateSession: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*/reasoningEngines/*}/sessions", + "body": "session", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = session_service.CreateSessionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseSessionServiceRestTransport._BaseCreateSession._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteSession: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = session_service.DeleteSessionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseSessionServiceRestTransport._BaseDeleteSession._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetSession: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = session_service.GetSessionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseSessionServiceRestTransport._BaseGetSession._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListEvents: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*/reasoningEngines/*/sessions/*}/events", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = session_service.ListEventsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseSessionServiceRestTransport._BaseListEvents._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListSessions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*/reasoningEngines/*}/sessions", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = session_service.ListSessionsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseSessionServiceRestTransport._BaseListSessions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateSession: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1/{session.name=projects/*/locations/*/reasoningEngines/*/sessions/*}", + "body": "session", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = session_service.UpdateSessionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseSessionServiceRestTransport._BaseUpdateSession._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetLocation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseListLocations: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*}/locations", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseGetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseSetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + body = json.dumps(transcoded_request["body"]) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseTestIamPermissions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseCancelOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseDeleteOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseGetOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseListOperations: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseWaitOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + +__all__ = ("_BaseSessionServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py index 1f781cc082..1a8512051b 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import specialist_pool @@ -56,8 +54,10 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport from .client import SpecialistPoolServiceClient @@ -136,7 +136,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: SpecialistPoolServiceAsyncClient: The constructed client. """ - return SpecialistPoolServiceClient.from_service_account_info.__func__(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + SpecialistPoolServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -152,7 +155,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: SpecialistPoolServiceAsyncClient: The constructed client. """ - return SpecialistPoolServiceClient.from_service_account_file.__func__(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + SpecialistPoolServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py index e19c4eb33e..1a6f5e7585 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import specialist_pool @@ -72,8 +70,10 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import SpecialistPoolServiceGrpcTransport from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py index 798be2aa5a..4d55620222 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py @@ -89,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -105,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py index c4a355cff5..77b46bc12b 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py @@ -1042,6 +1042,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1082,6 +1086,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1408,6 +1416,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1468,6 +1480,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1806,6 +1822,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1866,6 +1886,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2188,6 +2212,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2260,6 +2288,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2590,6 +2622,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2650,6 +2686,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2758,7 +2798,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2911,7 +2951,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3388,7 +3428,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest_asyncio.py index 83ded27d45..faa8579910 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest_asyncio.py @@ -886,7 +886,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1045,7 +1045,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1536,7 +1536,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1938,6 +1938,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1978,6 +1982,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2304,6 +2312,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2364,6 +2376,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2702,6 +2718,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2762,6 +2782,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3084,6 +3108,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3156,6 +3184,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3486,6 +3518,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3546,6 +3582,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest_base.py index 01c049badb..8f43b9a26f 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest_base.py @@ -986,6 +986,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1026,6 +1030,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1371,6 +1379,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1431,6 +1443,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1788,6 +1804,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1848,6 +1868,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2189,6 +2213,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2261,6 +2289,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2610,6 +2642,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2670,6 +2706,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py index a2e74b1084..5d6b6ca8e9 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py @@ -47,8 +47,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.tensorboard_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -70,9 +68,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport from .client import TensorboardServiceClient @@ -157,7 +157,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: TensorboardServiceAsyncClient: The constructed client. """ - return TensorboardServiceClient.from_service_account_info.__func__(TensorboardServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + TensorboardServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(TensorboardServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -173,7 +176,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: TensorboardServiceAsyncClient: The constructed client. """ - return TensorboardServiceClient.from_service_account_file.__func__(TensorboardServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + TensorboardServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(TensorboardServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py index 63e8b7f29e..d492ebd0c5 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py @@ -62,8 +62,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.tensorboard_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -85,9 +83,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import TensorboardServiceGrpcTransport from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py index b14dcde411..0454a91bbb 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py @@ -102,8 +102,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -118,11 +116,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py index a76d03ccc7..7b9382d913 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py @@ -2532,6 +2532,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2572,6 +2576,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2898,6 +2906,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2958,6 +2970,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -3296,6 +3312,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3356,6 +3376,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3678,6 +3702,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3750,6 +3778,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -4080,6 +4112,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4140,6 +4176,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -4742,7 +4782,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5386,7 +5426,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5537,7 +5577,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5689,7 +5729,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5842,7 +5882,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -8028,7 +8068,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_asyncio.py index 708ec9bbbe..2d85c8f014 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_asyncio.py @@ -3022,7 +3022,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3698,7 +3698,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3861,7 +3861,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4021,7 +4021,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4184,7 +4184,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6469,7 +6469,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -7732,6 +7732,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -7772,6 +7776,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -8098,6 +8106,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8158,6 +8170,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -8496,6 +8512,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8556,6 +8576,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -8878,6 +8902,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -8950,6 +8978,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -9280,6 +9312,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -9340,6 +9376,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_base.py index 842c2edfcb..bf5a34a2f4 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_base.py @@ -2323,6 +2323,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2363,6 +2367,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -2708,6 +2716,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2768,6 +2780,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -3125,6 +3141,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3185,6 +3205,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -3526,6 +3550,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3598,6 +3626,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3947,6 +3979,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4007,6 +4043,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/async_client.py b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/async_client.py index 555449aa61..92681232f1 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.vertex_rag_data_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import io @@ -57,9 +55,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import VertexRagDataServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import VertexRagDataServiceGrpcAsyncIOTransport from .client import VertexRagDataServiceClient @@ -144,7 +144,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: VertexRagDataServiceAsyncClient: The constructed client. """ - return VertexRagDataServiceClient.from_service_account_info.__func__(VertexRagDataServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + VertexRagDataServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(VertexRagDataServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -160,7 +163,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: VertexRagDataServiceAsyncClient: The constructed client. """ - return VertexRagDataServiceClient.from_service_account_file.__func__(VertexRagDataServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + VertexRagDataServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(VertexRagDataServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/client.py b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/client.py index 51264e5e9c..d5b30bb8b5 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/client.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.vertex_rag_data_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import io @@ -73,9 +71,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import VertexRagDataServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import VertexRagDataServiceGrpcTransport from .transports.grpc_asyncio import VertexRagDataServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/base.py b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/base.py index 79b9750011..e4eee4a8ee 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/base.py @@ -89,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -105,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest.py b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest.py index 58602cb228..f3ed86ebae 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest.py @@ -1444,6 +1444,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1484,6 +1488,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1810,6 +1818,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1870,6 +1882,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2208,6 +2224,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2268,6 +2288,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2590,6 +2614,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2662,6 +2690,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2992,6 +3024,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3052,6 +3088,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3160,7 +3200,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3311,7 +3351,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3459,7 +3499,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4064,7 +4104,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4524,7 +4564,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4680,7 +4720,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_asyncio.py index e3a9b5a35b..0e7fb54bda 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_asyncio.py @@ -1323,7 +1323,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1482,7 +1482,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1640,7 +1640,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2275,7 +2275,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2757,7 +2757,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2921,7 +2921,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3490,6 +3490,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3530,6 +3534,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -3856,6 +3864,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3916,6 +3928,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -4254,6 +4270,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4314,6 +4334,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -4636,6 +4660,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -4708,6 +4736,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -5038,6 +5070,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5098,6 +5134,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_base.py index fd4a419037..983fb0f255 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_base.py @@ -1345,6 +1345,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1385,6 +1389,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1730,6 +1738,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1790,6 +1802,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2147,6 +2163,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2207,6 +2227,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2548,6 +2572,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2620,6 +2648,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2969,6 +3001,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3029,6 +3065,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_service/async_client.py b/google/cloud/aiplatform_v1/services/vertex_rag_service/async_client.py index 6379b7a254..5456d0c30a 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_service/async_client.py @@ -119,7 +119,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: VertexRagServiceAsyncClient: The constructed client. """ - return VertexRagServiceClient.from_service_account_info.__func__(VertexRagServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + VertexRagServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(VertexRagServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -135,7 +138,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: VertexRagServiceAsyncClient: The constructed client. """ - return VertexRagServiceClient.from_service_account_file.__func__(VertexRagServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + VertexRagServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(VertexRagServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_service/transports/base.py b/google/cloud/aiplatform_v1/services/vertex_rag_service/transports/base.py index c876edee3a..ab8c4abadb 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_service/transports/base.py @@ -87,8 +87,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -103,11 +101,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/vertex_rag_service/transports/rest_base.py index 5acba141c5..2db4ab41e1 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_service/transports/rest_base.py @@ -899,6 +899,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -939,6 +943,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1284,6 +1292,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1344,6 +1356,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1701,6 +1717,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1761,6 +1781,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2102,6 +2126,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2174,6 +2202,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2523,6 +2555,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2583,6 +2619,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py index ff6b8c5b96..ccbbcdca32 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.vizier_service import pagers from google.cloud.aiplatform_v1.types import study from google.cloud.aiplatform_v1.types import study as gca_study @@ -55,7 +53,9 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport from .client import VizierServiceClient @@ -131,7 +131,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: VizierServiceAsyncClient: The constructed client. """ - return VizierServiceClient.from_service_account_info.__func__(VizierServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + VizierServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(VizierServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -147,7 +150,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: VizierServiceAsyncClient: The constructed client. """ - return VizierServiceClient.from_service_account_file.__func__(VizierServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + VizierServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(VizierServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1/services/vizier_service/client.py b/google/cloud/aiplatform_v1/services/vizier_service/client.py index c80c043566..8fbd0780a5 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.vizier_service import pagers from google.cloud.aiplatform_v1.types import study from google.cloud.aiplatform_v1.types import study as gca_study @@ -71,7 +69,9 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import VizierServiceGrpcTransport from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py index d6ba7a88c8..73c991c508 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py @@ -35,7 +35,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ @@ -91,8 +91,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -107,11 +105,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py index 9c5c98f3f6..140b876c90 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py @@ -38,7 +38,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO try: diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py index b4e05094f3..110d7f34fb 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py @@ -41,7 +41,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO from .grpc import VizierServiceGrpcTransport diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py index f2e156ef2f..ba2ec3248a 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py @@ -40,7 +40,7 @@ from google.cloud.aiplatform_v1.types import study from google.cloud.aiplatform_v1.types import study as gca_study from google.cloud.aiplatform_v1.types import vizier_service -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1494,6 +1494,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1534,6 +1538,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1860,6 +1868,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1920,6 +1932,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2258,6 +2274,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2318,6 +2338,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2640,6 +2664,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2712,6 +2740,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3042,6 +3074,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3102,6 +3138,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3370,7 +3410,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3991,7 +4031,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4101,7 +4141,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5309,7 +5349,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_asyncio.py index 024f75a244..3d3a76bc74 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_asyncio.py @@ -53,7 +53,7 @@ from google.cloud.aiplatform_v1.types import study from google.cloud.aiplatform_v1.types import study as gca_study from google.cloud.aiplatform_v1.types import vizier_service -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1557,7 +1557,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2212,7 +2212,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2330,7 +2330,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3599,7 +3599,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4003,6 +4003,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4043,6 +4047,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -4369,6 +4377,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4429,6 +4441,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -4767,6 +4783,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4827,6 +4847,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -5149,6 +5173,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -5221,6 +5249,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -5551,6 +5583,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5611,6 +5647,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_base.py index d77459d301..71c0832a58 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_base.py @@ -30,7 +30,7 @@ from google.cloud.aiplatform_v1.types import study from google.cloud.aiplatform_v1.types import study as gca_study from google.cloud.aiplatform_v1.types import vizier_service -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1526,6 +1526,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1566,6 +1570,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1911,6 +1919,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1971,6 +1983,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2328,6 +2344,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2388,6 +2408,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2729,6 +2753,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2801,6 +2829,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3150,6 +3182,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3210,6 +3246,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 0bfadf9248..59924fd02e 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -566,6 +566,8 @@ SharePointSources, SlackSource, TFRecordDestination, + VertexMultimodalDatasetDestination, + VertexMultimodalDatasetSource, ) from .job_service import ( CancelBatchPredictionJobRequest, @@ -991,6 +993,26 @@ PscInterfaceConfig, PSCAutomationState, ) +from .session import ( + EventActions, + EventMetadata, + Session, + SessionEvent, + Transcription, +) +from .session_service import ( + AppendEventRequest, + AppendEventResponse, + CreateSessionOperationMetadata, + CreateSessionRequest, + DeleteSessionRequest, + GetSessionRequest, + ListEventsRequest, + ListEventsResponse, + ListSessionsRequest, + ListSessionsResponse, + UpdateSessionRequest, +) from .specialist_pool import ( SpecialistPool, ) @@ -1665,6 +1687,8 @@ "SharePointSources", "SlackSource", "TFRecordDestination", + "VertexMultimodalDatasetDestination", + "VertexMultimodalDatasetSource", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", "CancelDataLabelingJobRequest", @@ -1997,6 +2021,22 @@ "PSCAutomationConfig", "PscInterfaceConfig", "PSCAutomationState", + "EventActions", + "EventMetadata", + "Session", + "SessionEvent", + "Transcription", + "AppendEventRequest", + "AppendEventResponse", + "CreateSessionOperationMetadata", + "CreateSessionRequest", + "DeleteSessionRequest", + "GetSessionRequest", + "ListEventsRequest", + "ListEventsResponse", + "ListSessionsRequest", + "ListSessionsResponse", + "UpdateSessionRequest", "SpecialistPool", "CreateSpecialistPoolOperationMetadata", "CreateSpecialistPoolRequest", diff --git a/google/cloud/aiplatform_v1/types/annotation.py b/google/cloud/aiplatform_v1/types/annotation.py index 61ed851b98..8bc946506d 100644 --- a/google/cloud/aiplatform_v1/types/annotation.py +++ b/google/cloud/aiplatform_v1/types/annotation.py @@ -20,8 +20,8 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import user_action_reference -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/annotation_spec.py b/google/cloud/aiplatform_v1/types/annotation_spec.py index 2bcf72cb39..a9cc4c6434 100644 --- a/google/cloud/aiplatform_v1/types/annotation_spec.py +++ b/google/cloud/aiplatform_v1/types/annotation_spec.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/artifact.py b/google/cloud/aiplatform_v1/types/artifact.py index f8de1d6c6a..8ef8c96c41 100644 --- a/google/cloud/aiplatform_v1/types/artifact.py +++ b/google/cloud/aiplatform_v1/types/artifact.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index e1c263e295..4914cd3cd8 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -31,9 +31,9 @@ from google.cloud.aiplatform_v1.types import ( unmanaged_container_model as gca_unmanaged_container_model, ) -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( @@ -273,6 +273,11 @@ class InputConfig(proto.Message): additional columns that are not described by the schema, and they will be ignored. + This field is a member of `oneof`_ ``source``. + vertex_multimodal_dataset_source (google.cloud.aiplatform_v1.types.VertexMultimodalDatasetSource): + A Vertex Managed Dataset. Currently, only + datasets of type Multimodal are supported. + This field is a member of `oneof`_ ``source``. instances_format (str): Required. The format in which instances are given, must be @@ -293,6 +298,14 @@ class InputConfig(proto.Message): oneof="source", message=io.BigQuerySource, ) + vertex_multimodal_dataset_source: io.VertexMultimodalDatasetSource = ( + proto.Field( + proto.MESSAGE, + number=4, + oneof="source", + message=io.VertexMultimodalDatasetSource, + ) + ) instances_format: str = proto.Field( proto.STRING, number=1, @@ -489,6 +502,11 @@ class OutputConfig(proto.Message): [google.rpc.Status][google.rpc.Status] represented as a STRUCT, and containing only ``code`` and ``message``. + This field is a member of `oneof`_ ``destination``. + vertex_multimodal_dataset_destination (google.cloud.aiplatform_v1.types.VertexMultimodalDatasetDestination): + The details for a Vertex Multimodal Dataset + that will be created for the output. + This field is a member of `oneof`_ ``destination``. predictions_format (str): Required. The format in which Vertex AI gives the @@ -509,6 +527,14 @@ class OutputConfig(proto.Message): oneof="destination", message=io.BigQueryDestination, ) + vertex_multimodal_dataset_destination: io.VertexMultimodalDatasetDestination = ( + proto.Field( + proto.MESSAGE, + number=6, + oneof="destination", + message=io.VertexMultimodalDatasetDestination, + ) + ) predictions_format: str = proto.Field( proto.STRING, number=1, @@ -537,6 +563,13 @@ class OutputInfo(proto.Message): ``bq://projectId.bqDatasetId`` format, into which the prediction output is written. + This field is a member of `oneof`_ ``output_location``. + vertex_multimodal_dataset_name (str): + Output only. The resource name of the Vertex Managed Dataset + created, into which the prediction output is written. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + This field is a member of `oneof`_ ``output_location``. bigquery_output_table (str): Output only. The name of the BigQuery table created, in @@ -555,6 +588,11 @@ class OutputInfo(proto.Message): number=2, oneof="output_location", ) + vertex_multimodal_dataset_name: str = proto.Field( + proto.STRING, + number=5, + oneof="output_location", + ) bigquery_output_table: str = proto.Field( proto.STRING, number=4, diff --git a/google/cloud/aiplatform_v1/types/cached_content.py b/google/cloud/aiplatform_v1/types/cached_content.py index f3fb112609..172b1f8807 100644 --- a/google/cloud/aiplatform_v1/types/cached_content.py +++ b/google/cloud/aiplatform_v1/types/cached_content.py @@ -22,8 +22,8 @@ from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import tool -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/content.py b/google/cloud/aiplatform_v1/types/content.py index 8cb833aa6f..101c10147d 100644 --- a/google/cloud/aiplatform_v1/types/content.py +++ b/google/cloud/aiplatform_v1/types/content.py @@ -22,9 +22,9 @@ from google.cloud.aiplatform_v1.types import openapi from google.cloud.aiplatform_v1.types import tool from google.cloud.aiplatform_v1.types import vertex_rag_data -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.type import date_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.type.date_pb2 as date_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/context.py b/google/cloud/aiplatform_v1/types/context.py index 16cb31a43c..2565866942 100644 --- a/google/cloud/aiplatform_v1/types/context.py +++ b/google/cloud/aiplatform_v1/types/context.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index 828af16172..ac4840216b 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -25,9 +25,9 @@ from google.cloud.aiplatform_v1.types import job_state from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import service_networking -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/data_item.py b/google/cloud/aiplatform_v1/types/data_item.py index 510e3deb4f..41dff36c0e 100644 --- a/google/cloud/aiplatform_v1/types/data_item.py +++ b/google/cloud/aiplatform_v1/types/data_item.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/data_labeling_job.py b/google/cloud/aiplatform_v1/types/data_labeling_job.py index 2ea0ec2c65..c65d5e03dd 100644 --- a/google/cloud/aiplatform_v1/types/data_labeling_job.py +++ b/google/cloud/aiplatform_v1/types/data_labeling_job.py @@ -21,10 +21,10 @@ from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import job_state -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.money_pb2 as money_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py index c810b9997b..6545e56438 100644 --- a/google/cloud/aiplatform_v1/types/dataset.py +++ b/google/cloud/aiplatform_v1/types/dataset.py @@ -22,8 +22,8 @@ from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import saved_query -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( @@ -43,8 +43,9 @@ class Dataset(proto.Message): Attributes: name (str): - Output only. Identifier. The resource name of - the Dataset. + Output only. Identifier. The resource name of the Dataset. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` display_name (str): Required. The user-defined name of the Dataset. The name can be up to 128 characters diff --git a/google/cloud/aiplatform_v1/types/dataset_service.py b/google/cloud/aiplatform_v1/types/dataset_service.py index aa594f5304..6c490b5049 100644 --- a/google/cloud/aiplatform_v1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1/types/dataset_service.py @@ -26,7 +26,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation from google.cloud.aiplatform_v1.types import saved_query as gca_saved_query -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( @@ -112,7 +112,6 @@ class CreateDatasetOperationMetadata(proto.Message): class GetDatasetRequest(proto.Message): r"""Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 Attributes: name (str): @@ -489,7 +488,6 @@ class DeleteDatasetVersionRequest(proto.Message): class GetDatasetVersionRequest(proto.Message): r"""Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 Attributes: name (str): diff --git a/google/cloud/aiplatform_v1/types/dataset_version.py b/google/cloud/aiplatform_v1/types/dataset_version.py index 5a03fe5aa8..771bc1714d 100644 --- a/google/cloud/aiplatform_v1/types/dataset_version.py +++ b/google/cloud/aiplatform_v1/types/dataset_version.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( @@ -36,8 +36,9 @@ class DatasetVersion(proto.Message): Attributes: name (str): - Output only. Identifier. The resource name of - the DatasetVersion. + Output only. Identifier. The resource name of the + DatasetVersion. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this DatasetVersion was created. diff --git a/google/cloud/aiplatform_v1/types/deployment_resource_pool.py b/google/cloud/aiplatform_v1/types/deployment_resource_pool.py index 991c9a6a6d..be316e5c06 100644 --- a/google/cloud/aiplatform_v1/types/deployment_resource_pool.py +++ b/google/cloud/aiplatform_v1/types/deployment_resource_pool.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import machine_resources -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/deployment_resource_pool_service.py b/google/cloud/aiplatform_v1/types/deployment_resource_pool_service.py index 1325565ba4..689b3265ae 100644 --- a/google/cloud/aiplatform_v1/types/deployment_resource_pool_service.py +++ b/google/cloud/aiplatform_v1/types/deployment_resource_pool_service.py @@ -25,7 +25,7 @@ ) from google.cloud.aiplatform_v1.types import endpoint from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py index c149a18242..51593479c9 100644 --- a/google/cloud/aiplatform_v1/types/endpoint.py +++ b/google/cloud/aiplatform_v1/types/endpoint.py @@ -24,8 +24,8 @@ from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import service_networking -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/endpoint_service.py b/google/cloud/aiplatform_v1/types/endpoint_service.py index b4b9bd99c0..06c6306e92 100644 --- a/google/cloud/aiplatform_v1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/endpoint_service.py @@ -22,7 +22,7 @@ from google.cloud.aiplatform_v1.types import deployment_stage as gca_deployment_stage from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/entity_type.py b/google/cloud/aiplatform_v1/types/entity_type.py index b289d9fe08..75fb6a684a 100644 --- a/google/cloud/aiplatform_v1/types/entity_type.py +++ b/google/cloud/aiplatform_v1/types/entity_type.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import featurestore_monitoring -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/evaluated_annotation.py b/google/cloud/aiplatform_v1/types/evaluated_annotation.py index 2bbced800a..b621c4b806 100644 --- a/google/cloud/aiplatform_v1/types/evaluated_annotation.py +++ b/google/cloud/aiplatform_v1/types/evaluated_annotation.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import explanation as gca_explanation -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/event.py b/google/cloud/aiplatform_v1/types/event.py index 2f4081629b..e3b4c350d2 100644 --- a/google/cloud/aiplatform_v1/types/event.py +++ b/google/cloud/aiplatform_v1/types/event.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/execution.py b/google/cloud/aiplatform_v1/types/execution.py index 390e185410..2b70eeba02 100644 --- a/google/cloud/aiplatform_v1/types/execution.py +++ b/google/cloud/aiplatform_v1/types/execution.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/explanation.py b/google/cloud/aiplatform_v1/types/explanation.py index 024e9dc839..3d0e509734 100644 --- a/google/cloud/aiplatform_v1/types/explanation.py +++ b/google/cloud/aiplatform_v1/types/explanation.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import explanation_metadata from google.cloud.aiplatform_v1.types import io -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/explanation_metadata.py b/google/cloud/aiplatform_v1/types/explanation_metadata.py index b4be565e37..a903a48841 100644 --- a/google/cloud/aiplatform_v1/types/explanation_metadata.py +++ b/google/cloud/aiplatform_v1/types/explanation_metadata.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/feature.py b/google/cloud/aiplatform_v1/types/feature.py index a124df4924..0bd1c4dece 100644 --- a/google/cloud/aiplatform_v1/types/feature.py +++ b/google/cloud/aiplatform_v1/types/feature.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import feature_monitoring_stats -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/feature_group.py b/google/cloud/aiplatform_v1/types/feature_group.py index 63eee4ce43..e85b0f8afd 100644 --- a/google/cloud/aiplatform_v1/types/feature_group.py +++ b/google/cloud/aiplatform_v1/types/feature_group.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import io -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py b/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py index 804bbd6cfe..d7ef8832c5 100644 --- a/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py +++ b/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/feature_online_store.py b/google/cloud/aiplatform_v1/types/feature_online_store.py index e021bcb506..4a7face2a6 100644 --- a/google/cloud/aiplatform_v1/types/feature_online_store.py +++ b/google/cloud/aiplatform_v1/types/feature_online_store.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import service_networking -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py b/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py index 4199faa2a5..bb8692d36b 100644 --- a/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py +++ b/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py @@ -25,7 +25,7 @@ from google.cloud.aiplatform_v1.types import feature_view as gca_feature_view from google.cloud.aiplatform_v1.types import feature_view_sync as gca_feature_view_sync from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/feature_online_store_service.py b/google/cloud/aiplatform_v1/types/feature_online_store_service.py index 2690fafe35..65b7491e4e 100644 --- a/google/cloud/aiplatform_v1/types/feature_online_store_service.py +++ b/google/cloud/aiplatform_v1/types/feature_online_store_service.py @@ -20,9 +20,9 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import featurestore_online_service -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/feature_registry_service.py b/google/cloud/aiplatform_v1/types/feature_registry_service.py index 0665b4d086..ad133bd881 100644 --- a/google/cloud/aiplatform_v1/types/feature_registry_service.py +++ b/google/cloud/aiplatform_v1/types/feature_registry_service.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import feature_group as gca_feature_group from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/feature_view.py b/google/cloud/aiplatform_v1/types/feature_view.py index 41822f6ec2..20a6c2cf0d 100644 --- a/google/cloud/aiplatform_v1/types/feature_view.py +++ b/google/cloud/aiplatform_v1/types/feature_view.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import machine_resources -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/feature_view_sync.py b/google/cloud/aiplatform_v1/types/feature_view_sync.py index 0177035cf2..b299a87425 100644 --- a/google/cloud/aiplatform_v1/types/feature_view_sync.py +++ b/google/cloud/aiplatform_v1/types/feature_view_sync.py @@ -19,9 +19,9 @@ import proto # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import interval_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.interval_pb2 as interval_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/featurestore.py b/google/cloud/aiplatform_v1/types/featurestore.py index 11e1c37276..e7313c4a95 100644 --- a/google/cloud/aiplatform_v1/types/featurestore.py +++ b/google/cloud/aiplatform_v1/types/featurestore.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/featurestore_online_service.py b/google/cloud/aiplatform_v1/types/featurestore_online_service.py index 705be0c60d..94e6eb9484 100644 --- a/google/cloud/aiplatform_v1/types/featurestore_online_service.py +++ b/google/cloud/aiplatform_v1/types/featurestore_online_service.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector from google.cloud.aiplatform_v1.types import types -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/featurestore_service.py b/google/cloud/aiplatform_v1/types/featurestore_service.py index cfe75e7a5b..6bcf06d1e1 100644 --- a/google/cloud/aiplatform_v1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1/types/featurestore_service.py @@ -25,9 +25,9 @@ from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.type import interval_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.type.interval_pb2 as interval_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/gen_ai_cache_service.py b/google/cloud/aiplatform_v1/types/gen_ai_cache_service.py index 3c702e7173..68e9f42de6 100644 --- a/google/cloud/aiplatform_v1/types/gen_ai_cache_service.py +++ b/google/cloud/aiplatform_v1/types/gen_ai_cache_service.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py index e5eaee7b18..a3f75710c7 100644 --- a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py +++ b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py @@ -23,8 +23,8 @@ from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import job_state from google.cloud.aiplatform_v1.types import study -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/index.py b/google/cloud/aiplatform_v1/types/index.py index 3c306c4428..162e64b501 100644 --- a/google/cloud/aiplatform_v1/types/index.py +++ b/google/cloud/aiplatform_v1/types/index.py @@ -21,8 +21,8 @@ from google.cloud.aiplatform_v1.types import deployed_index_ref from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/index_endpoint.py b/google/cloud/aiplatform_v1/types/index_endpoint.py index bfbea76141..27b892bea5 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint.py @@ -22,7 +22,7 @@ from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import service_networking -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/index_endpoint_service.py b/google/cloud/aiplatform_v1/types/index_endpoint_service.py index b5e81434ba..305868f3b1 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint_service.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/index_service.py b/google/cloud/aiplatform_v1/types/index_service.py index e252eab49b..363850bd87 100644 --- a/google/cloud/aiplatform_v1/types/index_service.py +++ b/google/cloud/aiplatform_v1/types/index_service.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import index as gca_index from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/io.py b/google/cloud/aiplatform_v1/types/io.py index 07d89f0bea..ac11ade652 100644 --- a/google/cloud/aiplatform_v1/types/io.py +++ b/google/cloud/aiplatform_v1/types/io.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import api_auth -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( @@ -32,6 +32,8 @@ "GcsDestination", "BigQuerySource", "BigQueryDestination", + "VertexMultimodalDatasetSource", + "VertexMultimodalDatasetDestination", "CsvDestination", "TFRecordDestination", "ContainerRegistryDestination", @@ -152,6 +154,46 @@ class BigQueryDestination(proto.Message): ) +class VertexMultimodalDatasetSource(proto.Message): + r"""The Vertex Multimodal Dataset for the input content. + + Attributes: + dataset_name (str): + Required. The resource name of the Vertex Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + """ + + dataset_name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class VertexMultimodalDatasetDestination(proto.Message): + r"""The details for a Vertex Multimodal Dataset output. + + Attributes: + bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): + Optional. The destination of the underlying + BigQuery table that will be created for the + output Multimodal Dataset. If not specified, the + BigQuery table will be created in a default + BigQuery dataset. + display_name (str): + Optional. Display name of the output dataset. + """ + + bigquery_destination: "BigQueryDestination" = proto.Field( + proto.MESSAGE, + number=1, + message="BigQueryDestination", + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + + class CsvDestination(proto.Message): r"""The storage details for CSV output content. diff --git a/google/cloud/aiplatform_v1/types/job_service.py b/google/cloud/aiplatform_v1/types/job_service.py index 1cb3b7f5a2..c4fa5aca2a 100644 --- a/google/cloud/aiplatform_v1/types/job_service.py +++ b/google/cloud/aiplatform_v1/types/job_service.py @@ -32,8 +32,8 @@ ) from google.cloud.aiplatform_v1.types import nas_job as gca_nas_job from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/llm_utility_service.py b/google/cloud/aiplatform_v1/types/llm_utility_service.py index 5109a57d22..a959b86270 100644 --- a/google/cloud/aiplatform_v1/types/llm_utility_service.py +++ b/google/cloud/aiplatform_v1/types/llm_utility_service.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import content -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/metadata_schema.py b/google/cloud/aiplatform_v1/types/metadata_schema.py index a5043d3113..92f58013b2 100644 --- a/google/cloud/aiplatform_v1/types/metadata_schema.py +++ b/google/cloud/aiplatform_v1/types/metadata_schema.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/metadata_service.py b/google/cloud/aiplatform_v1/types/metadata_service.py index 4726b19685..9a46a6307f 100644 --- a/google/cloud/aiplatform_v1/types/metadata_service.py +++ b/google/cloud/aiplatform_v1/types/metadata_service.py @@ -26,7 +26,7 @@ from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/metadata_store.py b/google/cloud/aiplatform_v1/types/metadata_store.py index eed1cc747c..270712a984 100644 --- a/google/cloud/aiplatform_v1/types/metadata_store.py +++ b/google/cloud/aiplatform_v1/types/metadata_store.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/migratable_resource.py b/google/cloud/aiplatform_v1/types/migratable_resource.py index 72d369b3c4..cfbdc67237 100644 --- a/google/cloud/aiplatform_v1/types/migratable_resource.py +++ b/google/cloud/aiplatform_v1/types/migratable_resource.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/migration_service.py b/google/cloud/aiplatform_v1/types/migration_service.py index 9d19366134..0a5a9182fc 100644 --- a/google/cloud/aiplatform_v1/types/migration_service.py +++ b/google/cloud/aiplatform_v1/types/migration_service.py @@ -23,7 +23,7 @@ migratable_resource as gca_migratable_resource, ) from google.cloud.aiplatform_v1.types import operation -from google.rpc import status_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/model.py b/google/cloud/aiplatform_v1/types/model.py index 60315bc23e..80453ee31d 100644 --- a/google/cloud/aiplatform_v1/types/model.py +++ b/google/cloud/aiplatform_v1/types/model.py @@ -23,9 +23,9 @@ from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import env_var from google.cloud.aiplatform_v1.types import explanation -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py index f725cbdb3f..3b84bfbcd0 100644 --- a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py @@ -24,10 +24,10 @@ from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import job_state from google.cloud.aiplatform_v1.types import model_monitoring -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/model_evaluation.py b/google/cloud/aiplatform_v1/types/model_evaluation.py index 80485ab2c3..a7c7d49738 100644 --- a/google/cloud/aiplatform_v1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1/types/model_evaluation.py @@ -20,8 +20,8 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import explanation -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py index 6ce7041641..98cbcda3ba 100644 --- a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py +++ b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py @@ -20,9 +20,9 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import explanation -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/model_service.py b/google/cloud/aiplatform_v1/types/model_service.py index 32ba35a7dc..75f76d80f7 100644 --- a/google/cloud/aiplatform_v1/types/model_service.py +++ b/google/cloud/aiplatform_v1/types/model_service.py @@ -27,7 +27,7 @@ from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/nas_job.py b/google/cloud/aiplatform_v1/types/nas_job.py index 13fa3b82cc..a52163a0ff 100644 --- a/google/cloud/aiplatform_v1/types/nas_job.py +++ b/google/cloud/aiplatform_v1/types/nas_job.py @@ -23,8 +23,8 @@ from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import job_state from google.cloud.aiplatform_v1.types import study -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/notebook_execution_job.py b/google/cloud/aiplatform_v1/types/notebook_execution_job.py index fd39bd3c92..cd6ea579f9 100644 --- a/google/cloud/aiplatform_v1/types/notebook_execution_job.py +++ b/google/cloud/aiplatform_v1/types/notebook_execution_job.py @@ -23,9 +23,9 @@ from google.cloud.aiplatform_v1.types import job_state as gca_job_state from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import network_spec as gca_network_spec -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py b/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py index f1fbb5595e..03e22e6bc7 100644 --- a/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py +++ b/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/notebook_runtime.py b/google/cloud/aiplatform_v1/types/notebook_runtime.py index 8f030fb1bc..d26993a462 100644 --- a/google/cloud/aiplatform_v1/types/notebook_runtime.py +++ b/google/cloud/aiplatform_v1/types/notebook_runtime.py @@ -28,7 +28,7 @@ notebook_runtime_template_ref as gca_notebook_runtime_template_ref, ) from google.cloud.aiplatform_v1.types import notebook_software_config -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/notebook_service.py b/google/cloud/aiplatform_v1/types/notebook_service.py index 6cf9ed2394..240967ca9d 100644 --- a/google/cloud/aiplatform_v1/types/notebook_service.py +++ b/google/cloud/aiplatform_v1/types/notebook_service.py @@ -24,7 +24,7 @@ ) from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/openapi.py b/google/cloud/aiplatform_v1/types/openapi.py index 4ea820593b..c184c6fb02 100644 --- a/google/cloud/aiplatform_v1/types/openapi.py +++ b/google/cloud/aiplatform_v1/types/openapi.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/operation.py b/google/cloud/aiplatform_v1/types/operation.py index f1adddb4ed..f0c0cd9fb9 100644 --- a/google/cloud/aiplatform_v1/types/operation.py +++ b/google/cloud/aiplatform_v1/types/operation.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/persistent_resource.py b/google/cloud/aiplatform_v1/types/persistent_resource.py index 1b5e2f1cd8..350896324d 100644 --- a/google/cloud/aiplatform_v1/types/persistent_resource.py +++ b/google/cloud/aiplatform_v1/types/persistent_resource.py @@ -22,8 +22,8 @@ from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import service_networking -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/persistent_resource_service.py b/google/cloud/aiplatform_v1/types/persistent_resource_service.py index 8bf56c058f..56a48a2b77 100644 --- a/google/cloud/aiplatform_v1/types/persistent_resource_service.py +++ b/google/cloud/aiplatform_v1/types/persistent_resource_service.py @@ -23,7 +23,7 @@ from google.cloud.aiplatform_v1.types import ( persistent_resource as gca_persistent_resource, ) -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/pipeline_job.py b/google/cloud/aiplatform_v1/types/pipeline_job.py index 4dcca2448d..57101cb2af 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1/types/pipeline_job.py @@ -27,9 +27,9 @@ from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import service_networking from google.cloud.aiplatform_v1.types import value as gca_value -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/pipeline_service.py b/google/cloud/aiplatform_v1/types/pipeline_service.py index ce8ab51284..697af698bd 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -22,7 +22,7 @@ from google.cloud.aiplatform_v1.types import operation from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/prediction_service.py b/google/cloud/aiplatform_v1/types/prediction_service.py index 54d45d99b7..d69099bbd8 100644 --- a/google/cloud/aiplatform_v1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1/types/prediction_service.py @@ -19,14 +19,14 @@ import proto # type: ignore -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import content as gca_content from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import tool from google.cloud.aiplatform_v1.types import types from google.cloud.aiplatform_v1.types import usage_metadata as gca_usage_metadata -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( @@ -1028,8 +1028,40 @@ class UsageMetadata(proto.Message): candidates_tokens_details (MutableSequence[google.cloud.aiplatform_v1.types.ModalityTokenCount]): Output only. List of modalities that were returned in the response. + tool_use_prompt_tokens_details (MutableSequence[google.cloud.aiplatform_v1.types.ModalityTokenCount]): + Output only. A detailed breakdown by modality + of the token counts from the results of tool + executions, which are provided back to the model + as input. + traffic_type (google.cloud.aiplatform_v1.types.GenerateContentResponse.UsageMetadata.TrafficType): + Output only. The traffic type for this + request. """ + class TrafficType(proto.Enum): + r"""The type of traffic that this request was processed with, + indicating which quota is consumed. + + Values: + TRAFFIC_TYPE_UNSPECIFIED (0): + Unspecified request traffic type. + ON_DEMAND (1): + The request was processed using Pay-As-You-Go + quota. + ON_DEMAND_PRIORITY (3): + Type for Priority Pay-As-You-Go traffic. + ON_DEMAND_FLEX (4): + Type for Flex traffic. + PROVISIONED_THROUGHPUT (2): + Type for Provisioned Throughput traffic. + """ + + TRAFFIC_TYPE_UNSPECIFIED = 0 + ON_DEMAND = 1 + ON_DEMAND_PRIORITY = 3 + ON_DEMAND_FLEX = 4 + PROVISIONED_THROUGHPUT = 2 + prompt_token_count: int = proto.Field( proto.INT32, number=1, @@ -1071,6 +1103,18 @@ class UsageMetadata(proto.Message): message=gca_content.ModalityTokenCount, ) ) + tool_use_prompt_tokens_details: MutableSequence[ + gca_content.ModalityTokenCount + ] = proto.RepeatedField( + proto.MESSAGE, + number=12, + message=gca_content.ModalityTokenCount, + ) + traffic_type: "GenerateContentResponse.UsageMetadata.TrafficType" = proto.Field( + proto.ENUM, + number=8, + enum="GenerateContentResponse.UsageMetadata.TrafficType", + ) candidates: MutableSequence[gca_content.Candidate] = proto.RepeatedField( proto.MESSAGE, diff --git a/google/cloud/aiplatform_v1/types/reasoning_engine.py b/google/cloud/aiplatform_v1/types/reasoning_engine.py index 31e7daaf12..67efa87e25 100644 --- a/google/cloud/aiplatform_v1/types/reasoning_engine.py +++ b/google/cloud/aiplatform_v1/types/reasoning_engine.py @@ -22,8 +22,8 @@ from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import env_var from google.cloud.aiplatform_v1.types import service_networking -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( @@ -228,6 +228,11 @@ class SourceCodeSpec(proto.Message): python_spec (google.cloud.aiplatform_v1.types.ReasoningEngineSpec.SourceCodeSpec.PythonSpec): Configuration for a Python application. + This field is a member of `oneof`_ ``language_spec``. + image_spec (google.cloud.aiplatform_v1.types.ReasoningEngineSpec.SourceCodeSpec.ImageSpec): + Optional. Configuration for building an image + with custom config file. + This field is a member of `oneof`_ ``language_spec``. """ @@ -246,6 +251,23 @@ class InlineSource(proto.Message): number=1, ) + class ImageSpec(proto.Message): + r"""The image spec for building an image (within a single build + step), based on the config file (i.e. Dockerfile) in the source + directory. + + Attributes: + build_args (MutableMapping[str, str]): + Optional. Build arguments to be used. They + will be passed through --build-arg flags. + """ + + build_args: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=1, + ) + class DeveloperConnectConfig(proto.Message): r"""Specifies the configuration for fetching source code from a Git repository that is managed by Developer Connect. This @@ -361,6 +383,12 @@ class PythonSpec(proto.Message): oneof="language_spec", message="ReasoningEngineSpec.SourceCodeSpec.PythonSpec", ) + image_spec: "ReasoningEngineSpec.SourceCodeSpec.ImageSpec" = proto.Field( + proto.MESSAGE, + number=5, + oneof="language_spec", + message="ReasoningEngineSpec.SourceCodeSpec.ImageSpec", + ) source_code_spec: SourceCodeSpec = proto.Field( proto.MESSAGE, diff --git a/google/cloud/aiplatform_v1/types/reasoning_engine_execution_service.py b/google/cloud/aiplatform_v1/types/reasoning_engine_execution_service.py index 471fcad7eb..fa0427ba45 100644 --- a/google/cloud/aiplatform_v1/types/reasoning_engine_execution_service.py +++ b/google/cloud/aiplatform_v1/types/reasoning_engine_execution_service.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/reasoning_engine_service.py b/google/cloud/aiplatform_v1/types/reasoning_engine_service.py index 6b6685ea09..f4741ad41d 100644 --- a/google/cloud/aiplatform_v1/types/reasoning_engine_service.py +++ b/google/cloud/aiplatform_v1/types/reasoning_engine_service.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import operation from google.cloud.aiplatform_v1.types import reasoning_engine as gca_reasoning_engine -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/saved_query.py b/google/cloud/aiplatform_v1/types/saved_query.py index fa9727a5da..16f51156c9 100644 --- a/google/cloud/aiplatform_v1/types/saved_query.py +++ b/google/cloud/aiplatform_v1/types/saved_query.py @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/schedule.py b/google/cloud/aiplatform_v1/types/schedule.py index cdb61246d4..6009b3614e 100644 --- a/google/cloud/aiplatform_v1/types/schedule.py +++ b/google/cloud/aiplatform_v1/types/schedule.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import notebook_service from google.cloud.aiplatform_v1.types import pipeline_service -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( @@ -119,6 +119,14 @@ class Schedule(proto.Message): the limit for starting the scheduled requests and not the execution of the operations/jobs created by the requests (if applicable). + max_concurrent_active_run_count (int): + Optional. Specifies the maximum number of + active runs that can be executed concurrently + for this Schedule. This limits the number of + runs that can be in a non-terminal state at the + same time. + Currently, this field is only supported for + requests of type CreatePipelineJobRequest. allow_queueing (bool): Optional. Whether new scheduled runs can be queued when max_concurrent_runs limit is reached. If set to true, new @@ -265,6 +273,10 @@ class RunResponse(proto.Message): proto.INT64, number=11, ) + max_concurrent_active_run_count: int = proto.Field( + proto.INT64, + number=21, + ) allow_queueing: bool = proto.Field( proto.BOOL, number=12, diff --git a/google/cloud/aiplatform_v1/types/schedule_service.py b/google/cloud/aiplatform_v1/types/schedule_service.py index cfa8414c73..87888576eb 100644 --- a/google/cloud/aiplatform_v1/types/schedule_service.py +++ b/google/cloud/aiplatform_v1/types/schedule_service.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import schedule as gca_schedule -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/session.py b/google/cloud/aiplatform_v1/types/session.py new file mode 100644 index 0000000000..14d57cdc16 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/session.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import content as gca_content +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "Session", + "SessionEvent", + "EventMetadata", + "EventActions", + "Transcription", + }, +) + + +class Session(proto.Message): + r"""A session contains a set of actions between users and Vertex + agents. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. Timestamp of when this session is considered + expired. This is *always* provided on output, regardless of + what was sent on input. The minimum value is 24 hours from + the time of creation. + + This field is a member of `oneof`_ ``expiration``. + ttl (google.protobuf.duration_pb2.Duration): + Optional. Input only. The TTL for this + session. The minimum value is 24 hours. + + This field is a member of `oneof`_ ``expiration``. + name (str): + Identifier. The resource name of the session. Format: + 'projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}'. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the session was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the session was + updated. + display_name (str): + Optional. The display name of the session. + labels (MutableMapping[str, str]): + The labels with user-defined metadata to + organize your Sessions. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + + See https://goo.gl/xmQnxf for more information + and examples of labels. + session_state (google.protobuf.struct_pb2.Struct): + Optional. Session specific memory which + stores key conversation points. + user_id (str): + Required. Immutable. String id provided by + the user + """ + + expire_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=13, + oneof="expiration", + message=timestamp_pb2.Timestamp, + ) + ttl: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=14, + oneof="expiration", + message=duration_pb2.Duration, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + display_name: str = proto.Field( + proto.STRING, + number=5, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + session_state: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=10, + message=struct_pb2.Struct, + ) + user_id: str = proto.Field( + proto.STRING, + number=12, + ) + + +class SessionEvent(proto.Message): + r"""An event represents a message from either the user or agent. + + Attributes: + name (str): + Identifier. The resource name of the event. + Format:``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}/events/{event}``. + author (str): + Required. The name of the agent that sent the + event, or user. + content (google.cloud.aiplatform_v1.types.Content): + Optional. Content of the event provided by + the author. + invocation_id (str): + Required. The invocation id of the event, + multiple events can have the same invocation id. + actions (google.cloud.aiplatform_v1.types.EventActions): + Optional. Actions executed by the agent. + timestamp (google.protobuf.timestamp_pb2.Timestamp): + Required. Timestamp when the event was + created on client side. + error_code (str): + Optional. Error code if the response is an + error. Code varies by model. + error_message (str): + Optional. Error message if the response is an + error. + event_metadata (google.cloud.aiplatform_v1.types.EventMetadata): + Optional. Metadata relating to this event. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + author: str = proto.Field( + proto.STRING, + number=3, + ) + content: gca_content.Content = proto.Field( + proto.MESSAGE, + number=4, + message=gca_content.Content, + ) + invocation_id: str = proto.Field( + proto.STRING, + number=5, + ) + actions: "EventActions" = proto.Field( + proto.MESSAGE, + number=6, + message="EventActions", + ) + timestamp: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + error_code: str = proto.Field( + proto.STRING, + number=9, + ) + error_message: str = proto.Field( + proto.STRING, + number=10, + ) + event_metadata: "EventMetadata" = proto.Field( + proto.MESSAGE, + number=11, + message="EventMetadata", + ) + + +class EventMetadata(proto.Message): + r"""Metadata relating to a LLM response event. + + Attributes: + grounding_metadata (google.cloud.aiplatform_v1.types.GroundingMetadata): + Optional. Metadata returned to client when + grounding is enabled. + partial (bool): + Optional. Indicates whether the text content + is part of a unfinished text stream. Only used + for streaming mode and when the content is plain + text. + turn_complete (bool): + Optional. Indicates whether the response from + the model is complete. Only used for streaming + mode. + interrupted (bool): + Optional. Flag indicating that LLM was + interrupted when generating the content. Usually + it's due to user interruption during a bidi + streaming. + long_running_tool_ids (MutableSequence[str]): + Optional. Set of ids of the long running + function calls. Agent client will know from this + field about which function call is long running. + Only valid for function call event. + branch (str): + Optional. The branch of the event. The format is like + agent_1.agent_2.agent_3, where agent_1 is the parent of + agent_2, and agent_2 is the parent of agent_3. Branch is + used when multiple child agents shouldn't see their + siblings' conversation history. + custom_metadata (google.protobuf.struct_pb2.Struct): + The custom metadata of the LlmResponse. + input_transcription (google.cloud.aiplatform_v1.types.Transcription): + Optional. Audio transcription of user input. + output_transcription (google.cloud.aiplatform_v1.types.Transcription): + Optional. Audio transcription of model + output. + """ + + grounding_metadata: gca_content.GroundingMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=gca_content.GroundingMetadata, + ) + partial: bool = proto.Field( + proto.BOOL, + number=2, + ) + turn_complete: bool = proto.Field( + proto.BOOL, + number=3, + ) + interrupted: bool = proto.Field( + proto.BOOL, + number=4, + ) + long_running_tool_ids: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=5, + ) + branch: str = proto.Field( + proto.STRING, + number=6, + ) + custom_metadata: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=7, + message=struct_pb2.Struct, + ) + input_transcription: "Transcription" = proto.Field( + proto.MESSAGE, + number=10, + message="Transcription", + ) + output_transcription: "Transcription" = proto.Field( + proto.MESSAGE, + number=11, + message="Transcription", + ) + + +class EventActions(proto.Message): + r"""Actions are parts of events that are executed by the agent. + + Attributes: + skip_summarization (bool): + Optional. If true, it won't call model to summarize function + response. Only used for function_response event. + state_delta (google.protobuf.struct_pb2.Struct): + Optional. Indicates that the event is + updating the state with the given delta. + artifact_delta (MutableMapping[str, int]): + Optional. Indicates that the event is + updating an artifact. key is the filename, value + is the version. + escalate (bool): + Optional. The agent is escalating to a higher + level agent. + requested_auth_configs (google.protobuf.struct_pb2.Struct): + Optional. Will only be set by a tool response + indicating tool request euc. Struct key is the + function call id since one function call + response (from model) could correspond to + multiple function calls. Struct value is the + required auth config, which can be another + struct. + transfer_agent (str): + Optional. If set, the event transfers to the + specified agent. + """ + + skip_summarization: bool = proto.Field( + proto.BOOL, + number=1, + ) + state_delta: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Struct, + ) + artifact_delta: MutableMapping[str, int] = proto.MapField( + proto.STRING, + proto.INT32, + number=3, + ) + escalate: bool = proto.Field( + proto.BOOL, + number=6, + ) + requested_auth_configs: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=7, + message=struct_pb2.Struct, + ) + transfer_agent: str = proto.Field( + proto.STRING, + number=8, + ) + + +class Transcription(proto.Message): + r"""Audio transcription in Server Content. + + Attributes: + text (str): + Optional. Transcription text. + finished (bool): + Optional. The bool indicates the end of the + transcription. + """ + + text: str = proto.Field( + proto.STRING, + number=1, + ) + finished: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/session_service.py b/google/cloud/aiplatform_v1/types/session_service.py new file mode 100644 index 0000000000..dd0326d507 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/session_service.py @@ -0,0 +1,353 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import operation +from google.cloud.aiplatform_v1.types import session as gca_session +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "CreateSessionRequest", + "CreateSessionOperationMetadata", + "GetSessionRequest", + "ListSessionsRequest", + "ListSessionsResponse", + "UpdateSessionRequest", + "DeleteSessionRequest", + "ListEventsRequest", + "ListEventsResponse", + "AppendEventRequest", + "AppendEventResponse", + }, +) + + +class CreateSessionRequest(proto.Message): + r"""Request message for + [SessionService.CreateSession][google.cloud.aiplatform.v1.SessionService.CreateSession]. + + Attributes: + parent (str): + Required. The resource name of the location to create the + session in. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`` + session (google.cloud.aiplatform_v1.types.Session): + Required. The session to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + session: gca_session.Session = proto.Field( + proto.MESSAGE, + number=2, + message=gca_session.Session, + ) + + +class CreateSessionOperationMetadata(proto.Message): + r"""Metadata associated with the + [SessionService.CreateSession][google.cloud.aiplatform.v1.SessionService.CreateSession] + operation. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetSessionRequest(proto.Message): + r"""Request message for + [SessionService.GetSession][google.cloud.aiplatform.v1.SessionService.GetSession]. + + Attributes: + name (str): + Required. The resource name of the session. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListSessionsRequest(proto.Message): + r"""Request message for + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions]. + + Attributes: + parent (str): + Required. The resource name of the location to list sessions + from. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`` + page_size (int): + Optional. The maximum number of sessions to + return. The service may return fewer than this + value. If unspecified, at most 100 sessions will + be returned. + page_token (str): + Optional. The + [next_page_token][google.cloud.aiplatform.v1.ListSessionsResponse.next_page_token] + value returned from a previous list + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions] + call. + filter (str): + Optional. The standard list filter. Supported fields: \* + ``display_name`` \* ``user_id`` \* ``labels`` + + Example: ``display_name="abc"``, ``user_id="123"``, + ``labels.key="value"``. + order_by (str): + Optional. A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``create_time`` + - ``update_time`` + + Example: ``create_time desc``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListSessionsResponse(proto.Message): + r"""Response message for + [SessionService.ListSessions][google.cloud.aiplatform.v1.SessionService.ListSessions]. + + Attributes: + sessions (MutableSequence[google.cloud.aiplatform_v1.types.Session]): + A list of sessions matching the request. + next_page_token (str): + A token, which can be sent as + [ListSessionsRequest.page_token][google.cloud.aiplatform.v1.ListSessionsRequest.page_token] + to retrieve the next page. Absence of this field indicates + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + sessions: MutableSequence[gca_session.Session] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_session.Session, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateSessionRequest(proto.Message): + r"""Request message for + [SessionService.UpdateSession][google.cloud.aiplatform.v1.SessionService.UpdateSession]. + + Attributes: + session (google.cloud.aiplatform_v1.types.Session): + Required. The session to update. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Field mask is used to control which + fields get updated. If the mask is not present, + all fields will be updated. + """ + + session: gca_session.Session = proto.Field( + proto.MESSAGE, + number=1, + message=gca_session.Session, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteSessionRequest(proto.Message): + r"""Request message for + [SessionService.DeleteSession][google.cloud.aiplatform.v1.SessionService.DeleteSession]. + + Attributes: + name (str): + Required. The resource name of the session. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListEventsRequest(proto.Message): + r"""Request message for + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents]. + + Attributes: + parent (str): + Required. The resource name of the session to list events + from. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + page_size (int): + Optional. The maximum number of events to + return. The service may return fewer than this + value. If unspecified, at most 100 events will + be returned. These events are ordered by + timestamp in ascending order. + page_token (str): + Optional. The + [next_page_token][google.cloud.aiplatform.v1.ListEventsResponse.next_page_token] + value returned from a previous list + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents] + call. + filter (str): + Optional. The standard list filter. Supported fields: \* + ``timestamp`` range (i.e. + ``timestamp>="2025-01-31T11:30:00-04:00"`` where the + timestamp is in RFC 3339 format) + + More detail in `AIP-160 `__. + order_by (str): + Optional. A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``timestamp`` + + Example: ``timestamp desc``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListEventsResponse(proto.Message): + r"""Response message for + [SessionService.ListEvents][google.cloud.aiplatform.v1.SessionService.ListEvents]. + + Attributes: + session_events (MutableSequence[google.cloud.aiplatform_v1.types.SessionEvent]): + A list of events matching the request. + Ordered by timestamp in ascending order. + next_page_token (str): + A token, which can be sent as + [ListEventsRequest.page_token][google.cloud.aiplatform.v1.ListEventsRequest.page_token] + to retrieve the next page. Absence of this field indicates + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + session_events: MutableSequence[gca_session.SessionEvent] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_session.SessionEvent, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class AppendEventRequest(proto.Message): + r"""Request message for + [SessionService.AppendEvent][google.cloud.aiplatform.v1.SessionService.AppendEvent]. + + Attributes: + name (str): + Required. The resource name of the session to append event + to. Format: + ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}`` + event (google.cloud.aiplatform_v1.types.SessionEvent): + Required. The event to append to the session. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + event: gca_session.SessionEvent = proto.Field( + proto.MESSAGE, + number=2, + message=gca_session.SessionEvent, + ) + + +class AppendEventResponse(proto.Message): + r"""Response message for + [SessionService.AppendEvent][google.cloud.aiplatform.v1.SessionService.AppendEvent]. + + """ + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/specialist_pool_service.py b/google/cloud/aiplatform_v1/types/specialist_pool_service.py index b3e0533e83..7facfbf1d8 100644 --- a/google/cloud/aiplatform_v1/types/specialist_pool_service.py +++ b/google/cloud/aiplatform_v1/types/specialist_pool_service.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import operation from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/study.py b/google/cloud/aiplatform_v1/types/study.py index dac41d5f1f..02dca81529 100644 --- a/google/cloud/aiplatform_v1/types/study.py +++ b/google/cloud/aiplatform_v1/types/study.py @@ -19,10 +19,10 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/tensorboard.py b/google/cloud/aiplatform_v1/types/tensorboard.py index 23dad29b79..8507cd9de9 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard.py +++ b/google/cloud/aiplatform_v1/types/tensorboard.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/tensorboard_data.py b/google/cloud/aiplatform_v1/types/tensorboard_data.py index 6d9e6dab90..23dff191b0 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard_data.py +++ b/google/cloud/aiplatform_v1/types/tensorboard_data.py @@ -20,7 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import tensorboard_time_series -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/tensorboard_experiment.py b/google/cloud/aiplatform_v1/types/tensorboard_experiment.py index dea6536f6d..43f634a0d4 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard_experiment.py +++ b/google/cloud/aiplatform_v1/types/tensorboard_experiment.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/tensorboard_run.py b/google/cloud/aiplatform_v1/types/tensorboard_run.py index c12ca46c30..d220ff56e6 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard_run.py +++ b/google/cloud/aiplatform_v1/types/tensorboard_run.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/tensorboard_service.py b/google/cloud/aiplatform_v1/types/tensorboard_service.py index bbcb49f0e2..1aba2adfa6 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard_service.py +++ b/google/cloud/aiplatform_v1/types/tensorboard_service.py @@ -29,7 +29,7 @@ from google.cloud.aiplatform_v1.types import ( tensorboard_time_series as gca_tensorboard_time_series, ) -from google.protobuf import field_mask_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/tensorboard_time_series.py b/google/cloud/aiplatform_v1/types/tensorboard_time_series.py index 834269dfea..9ebd98aacc 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard_time_series.py +++ b/google/cloud/aiplatform_v1/types/tensorboard_time_series.py @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/tool.py b/google/cloud/aiplatform_v1/types/tool.py index 7ee7ca9d91..26561cf22f 100644 --- a/google/cloud/aiplatform_v1/types/tool.py +++ b/google/cloud/aiplatform_v1/types/tool.py @@ -20,8 +20,8 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import openapi -from google.protobuf import struct_pb2 # type: ignore -from google.type import latlng_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.type.latlng_pb2 as latlng_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/training_pipeline.py b/google/cloud/aiplatform_v1/types/training_pipeline.py index 690c31494b..1c21eaca55 100644 --- a/google/cloud/aiplatform_v1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1/types/training_pipeline.py @@ -23,9 +23,9 @@ from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import pipeline_state -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/tuning_job.py b/google/cloud/aiplatform_v1/types/tuning_job.py index 80a35c9992..49d71ba0de 100644 --- a/google/cloud/aiplatform_v1/types/tuning_job.py +++ b/google/cloud/aiplatform_v1/types/tuning_job.py @@ -22,8 +22,8 @@ from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import job_state -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/vertex_rag_data.py b/google/cloud/aiplatform_v1/types/vertex_rag_data.py index 67c9ae3810..d739d44a1b 100644 --- a/google/cloud/aiplatform_v1/types/vertex_rag_data.py +++ b/google/cloud/aiplatform_v1/types/vertex_rag_data.py @@ -22,7 +22,7 @@ from google.cloud.aiplatform_v1.types import api_auth as gca_api_auth from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import io -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/vertex_rag_data_service.py b/google/cloud/aiplatform_v1/types/vertex_rag_data_service.py index bb1f3336f0..003164b00c 100644 --- a/google/cloud/aiplatform_v1/types/vertex_rag_data_service.py +++ b/google/cloud/aiplatform_v1/types/vertex_rag_data_service.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import operation from google.cloud.aiplatform_v1.types import vertex_rag_data -from google.rpc import status_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1/types/vizier_service.py b/google/cloud/aiplatform_v1/types/vizier_service.py index 6ad50e4af1..523522e2fd 100644 --- a/google/cloud/aiplatform_v1/types/vizier_service.py +++ b/google/cloud/aiplatform_v1/types/vizier_service.py @@ -21,7 +21,7 @@ from google.cloud.aiplatform_v1.types import operation from google.cloud.aiplatform_v1.types import study as gca_study -from google.protobuf import timestamp_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 759601dc07..396469f035 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -292,6 +292,7 @@ from .types.evaluation_service import CometInstance from .types.evaluation_service import CometResult from .types.evaluation_service import CometSpec +from .types.evaluation_service import ComputationBasedMetricSpec from .types.evaluation_service import ContentMap from .types.evaluation_service import CustomOutput from .types.evaluation_service import CustomOutputFormatConfig @@ -318,7 +319,9 @@ from .types.evaluation_service import GroundednessInstance from .types.evaluation_service import GroundednessResult from .types.evaluation_service import GroundednessSpec +from .types.evaluation_service import LLMBasedMetricSpec from .types.evaluation_service import Metric +from .types.evaluation_service import MetricResult from .types.evaluation_service import MetricxInput from .types.evaluation_service import MetricxInstance from .types.evaluation_service import MetricxResult @@ -341,6 +344,7 @@ from .types.evaluation_service import PointwiseMetricInstance from .types.evaluation_service import PointwiseMetricResult from .types.evaluation_service import PointwiseMetricSpec +from .types.evaluation_service import PredefinedMetricSpec from .types.evaluation_service import QuestionAnsweringCorrectnessInput from .types.evaluation_service import QuestionAnsweringCorrectnessInstance from .types.evaluation_service import QuestionAnsweringCorrectnessResult @@ -697,6 +701,8 @@ from .types.io import SharePointSources from .types.io import SlackSource from .types.io import TFRecordDestination +from .types.io import VertexMultimodalDatasetDestination +from .types.io import VertexMultimodalDatasetSource from .types.job_service import CancelBatchPredictionJobRequest from .types.job_service import CancelCustomJobRequest from .types.job_service import CancelDataLabelingJobRequest @@ -1111,6 +1117,7 @@ from .types.session import EventMetadata from .types.session import Session from .types.session import SessionEvent +from .types.session import Transcription from .types.session_service import AppendEventRequest from .types.session_service import AppendEventResponse from .types.session_service import CreateSessionOperationMetadata @@ -1560,6 +1567,7 @@ def _get_version(dependency_name): "CometSpec", "CompleteTrialRequest", "CompletionStats", + "ComputationBasedMetricSpec", "ComputeTokensRequest", "ComputeTokensResponse", "ContainerRegistryDestination", @@ -2003,6 +2011,7 @@ def _get_version(dependency_name): "JiraSource", "JobServiceClient", "JobState", + "LLMBasedMetricSpec", "LargeModelReference", "LineageSubgraph", "ListAnnotationsRequest", @@ -2146,6 +2155,7 @@ def _get_version(dependency_name): "MetadataServiceClient", "MetadataStore", "Metric", + "MetricResult", "MetricxInput", "MetricxInstance", "MetricxResult", @@ -2262,6 +2272,7 @@ def _get_version(dependency_name): "PostStartupScriptConfig", "PreTunedModel", "PrebuiltVoiceConfig", + "PredefinedMetricSpec", "PredefinedSplit", "PredictLongRunningMetadata", "PredictLongRunningResponse", @@ -2568,6 +2579,7 @@ def _get_version(dependency_name): "TrajectorySingleToolUseMetricValue", "TrajectorySingleToolUseResults", "TrajectorySingleToolUseSpec", + "Transcription", "Trial", "TrialContext", "TunedModel", @@ -2664,6 +2676,8 @@ def _get_version(dependency_name): "VeoTuningSpec", "VertexAISearch", "VertexAiSearchConfig", + "VertexMultimodalDatasetDestination", + "VertexMultimodalDatasetSource", "VertexRagDataServiceClient", "VertexRagServiceClient", "VertexRagStore", diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index c69e57edea..dccbc57951 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -45,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers from google.cloud.aiplatform_v1beta1.types import annotation from google.cloud.aiplatform_v1beta1.types import annotation_spec @@ -63,10 +61,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport from .client import DatasetServiceClient @@ -119,6 +119,8 @@ class DatasetServiceAsyncClient: parse_rag_corpus_path = staticmethod(DatasetServiceClient.parse_rag_corpus_path) saved_query_path = staticmethod(DatasetServiceClient.saved_query_path) parse_saved_query_path = staticmethod(DatasetServiceClient.parse_saved_query_path) + template_path = staticmethod(DatasetServiceClient.template_path) + parse_template_path = staticmethod(DatasetServiceClient.parse_template_path) common_billing_account_path = staticmethod( DatasetServiceClient.common_billing_account_path ) @@ -157,7 +159,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: DatasetServiceAsyncClient: The constructed client. """ - return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + DatasetServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(DatasetServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -173,7 +178,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: DatasetServiceAsyncClient: The constructed client. """ - return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + DatasetServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(DatasetServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py index fa2c36195d..d1a653e595 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -61,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers from google.cloud.aiplatform_v1beta1.types import annotation from google.cloud.aiplatform_v1beta1.types import annotation_spec @@ -79,10 +77,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import DatasetServiceGrpcTransport from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport @@ -469,6 +469,28 @@ def parse_saved_query_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def template_path( + project: str, + location: str, + template: str, + ) -> str: + """Returns a fully-qualified template string.""" + return "projects/{project}/locations/{location}/templates/{template}".format( + project=project, + location=location, + template=template, + ) + + @staticmethod + def parse_template_path(path: str) -> Dict[str, str]: + """Parses a template path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/templates/(?P